This is the 5.4.164 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmGwZl8ACgkQONu9yGCS aT54VRAAh73Y6oTTOoIwUtY+nWLaBLB+uQo9/xUO5lVKsammaqJDH9D/H6DvSA0T AmBLou4TbMOvsWtAnXmlO/2O37FsT8xZDh0psFE7ROJRMu2QfxtDDn3jk5KzhHD2 Ze2AuQ8SbxPMxjOqRgGJKwOZCGKpG7ADYRMPSgfao8NtSULrjDZ8o40hik0RSaGB 2vkTuGJvQaIQ8fwzgL3LH7mQ2E+1Jja/eCzo4ArARS9f846HY0hq9It7mkEIjUpd Ew91OWgmMmQCK2639zQI3J0F4agJgwSlMhm+NSbY4mNsINSzKTz4h9OoOMLjtCl8 jqxmHXENQSV0vHH2CS/n5uDKAe7GEv2l7aZaIQpcOOOpx94eyYzM1+8qHe7GxfX7 xlo265wjk8XB8L8/cuulQ3qJ4x9JXGJAWDfOjFaFBxZTsLOfnbGft3lXxCMYF7eQ anRWgq237ekKYBaBV4u0RMq3xglQBgb3eNYksZvcZff5GNqY0gWYXyg4US3u/0tF bsDVR/ZeNYU6WKNNWH6n6rpGfMpYGvJPefmXX7tcKWY9PkksBCqTRXClgp637/n0 0YP1tXeac3QwYNzxXzkn+0WwNWZPDg9R2T6/qj/K1/B1ezqPQkl6dNoxv6AM/Qap 5aQWYtJSTTF0X9b0sRoeGE0NGMZw9Zt0R7e3O0o/zzWF2Q/7UU0= =wRqh -----END PGP SIGNATURE----- Merge 5.4.164 into android11-5.4-lts Changes in 5.4.164 NFSv42: Fix pagecache invalidation after COPY/CLONE of: clk: Make <linux/of_clk.h> self-contained arm64: dts: mcbin: support 2W SFP modules can: j1939: j1939_tp_cmd_recv(): check the dst address of TP.CM_BAM gfs2: Fix length of holes reported at end-of-file drm/sun4i: fix unmet dependency on RESET_CONTROLLER for PHY_SUN6I_MIPI_DPHY mac80211: do not access the IV when it was stripped net/smc: Transfer remaining wait queue entries during fallback atlantic: Fix OOB read and write in hw_atl_utils_fw_rpc_wait net: return correct error code platform/x86: thinkpad_acpi: Fix WWAN device disabled issue after S3 deep s390/setup: avoid using memblock_enforce_memory_limit btrfs: check-integrity: fix a warning on write caching disabled disk thermal: core: Reset previous low and high trip during thermal zone init scsi: iscsi: Unblock session then wake up error handler ata: ahci: Add Green Sardine vendor ID as board_ahci_mobile ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port() net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock() perf hist: Fix memory leak of a perf_hpp_fmt perf report: Fix memory leaks around perf_tip() net/smc: Avoid warning of possible recursive locking vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit kprobes: Limit max data_size of the kretprobe instances rt2x00: do not mark device gone on EPROTO errors during start ipmi: Move remove_work to dedicated workqueue cpufreq: Fix get_cpu_device() failure in add_cpu_dev_symlink() s390/pci: move pseudo-MMIO to prevent MIO overlap fget: check that the fd still exists after getting a ref to it sata_fsl: fix UAF in sata_fsl_port_stop when rmmod sata_fsl sata_fsl: fix warning in remove_proc_entry when rmmod sata_fsl i2c: stm32f7: flush TX FIFO upon transfer errors i2c: stm32f7: recover the bus on access timeout i2c: stm32f7: stop dma transfer in case of NACK i2c: cbus-gpio: set atomic transfer callback natsemi: xtensa: fix section mismatch warnings net: qlogic: qlcnic: Fix a NULL pointer dereference in qlcnic_83xx_add_rings() net: mpls: Fix notifications when deleting a device siphash: use _unaligned version by default net/mlx4_en: Fix an use-after-free bug in mlx4_en_try_alloc_resources() selftests: net: Correct case name rxrpc: Fix rxrpc_local leak in rxrpc_lookup_peer() net: usb: lan78xx: lan78xx_phy_init(): use PHY_POLL instead of "0" if no IRQ is available net: marvell: mvpp2: Fix the computation of shared CPUs net: annotate data-races on txq->xmit_lock_owner ipv4: convert fib_num_tclassid_users to atomic_t net/rds: correct socket tunable error in rds_tcp_tune() net/smc: Keep smc_close_final rc during active close drm/msm: Do hw_init() before capturing GPU state ipv6: fix memory leak in fib6_rule_suppress KVM: x86/pmu: Fix reserved bits for AMD PerfEvtSeln register sched/uclamp: Fix rq->uclamp_max not set on first enqueue parisc: Fix KBUILD_IMAGE for self-extracting kernel parisc: Fix "make install" on newer debian releases vgacon: Propagate console boot parameters before calling `vc_resize' xhci: Fix commad ring abort, write all 64 bits to CRCR register. USB: NO_LPM quirk Lenovo Powered USB-C Travel Hub usb: typec: tcpm: Wait in SNK_DEBOUNCED until disconnect x86/tsc: Add a timer to make sure TSC_adjust is always checked x86/tsc: Disable clocksource watchdog for TSC on qualified platorms x86/64/mm: Map all kernel memory into trampoline_pgd tty: serial: msm_serial: Deactivate RX DMA for polling support serial: pl011: Add ACPI SBSA UART match id serial: core: fix transmit-buffer reset and memleak serial: 8250_pci: Fix ACCES entries in pci_serial_quirks array serial: 8250_pci: rewrite pericom_do_set_divisor() iwlwifi: mvm: retry init flow if failed parisc: Mark cr16 CPU clocksource unstable on all SMP machines net/tls: Fix authentication failure in CCM mode ipmi: msghandler: Make symbol 'remove_work_wq' static Linux 5.4.164 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I11fd72fac5d0985b3b51c86a8b201d3bfd6be049
This commit is contained in:
commit
4872cb8f42
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 163
|
||||
SUBLEVEL = 164
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
@ -71,6 +71,7 @@
|
||||
tx-fault-gpio = <&cp1_gpio1 26 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&cp1_sfpp0_pins>;
|
||||
maximum-power-milliwatt = <2000>;
|
||||
};
|
||||
|
||||
sfp_eth1: sfp-eth1 {
|
||||
@ -83,6 +84,7 @@
|
||||
tx-fault-gpio = <&cp0_gpio2 30 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&cp1_sfpp1_pins &cp0_sfpp1_pins>;
|
||||
maximum-power-milliwatt = <2000>;
|
||||
};
|
||||
|
||||
sfp_eth3: sfp-eth3 {
|
||||
@ -95,6 +97,7 @@
|
||||
tx-fault-gpio = <&cp0_gpio2 19 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&cp0_sfp_1g_pins &cp1_sfp_1g_pins>;
|
||||
maximum-power-milliwatt = <2000>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -17,7 +17,12 @@
|
||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||
#
|
||||
|
||||
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||
boot := arch/parisc/boot
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
else
|
||||
KBUILD_IMAGE := vmlinuz
|
||||
endif
|
||||
|
||||
NM = sh $(srctree)/arch/parisc/nm
|
||||
CHECKFLAGS += -D__hppa__=1
|
||||
|
@ -39,6 +39,7 @@ verify "$3"
|
||||
if [ -n "${INSTALLKERNEL}" ]; then
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||
fi
|
||||
|
||||
# Default install
|
||||
|
@ -245,27 +245,13 @@ void __init time_init(void)
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs on
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
continue;
|
||||
if ((cpu0_loc != 0) &&
|
||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||
continue;
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
break;
|
||||
}
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
|
||||
|
@ -14,12 +14,13 @@
|
||||
|
||||
/* I/O Map */
|
||||
#define ZPCI_IOMAP_SHIFT 48
|
||||
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
|
||||
#define ZPCI_IOMAP_ADDR_SHIFT 62
|
||||
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
|
||||
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
|
||||
#define ZPCI_IOMAP_MAX_ENTRIES \
|
||||
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
|
||||
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
|
||||
#define ZPCI_IOMAP_ADDR_IDX_MASK \
|
||||
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
|
||||
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
|
||||
|
||||
struct zpci_iomap_entry {
|
||||
u32 fh;
|
||||
|
@ -841,9 +841,6 @@ static void __init setup_memory(void)
|
||||
storage_key_init_range(reg->base, reg->base + reg->size);
|
||||
}
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
/* Only cosmetics */
|
||||
memblock_enforce_memory_limit(memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1162,6 +1162,12 @@ void mark_tsc_unstable(char *reason)
|
||||
|
||||
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
||||
|
||||
static void __init tsc_disable_clocksource_watchdog(void)
|
||||
{
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
}
|
||||
|
||||
static void __init check_system_tsc_reliable(void)
|
||||
{
|
||||
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
|
||||
@ -1178,6 +1184,23 @@ static void __init check_system_tsc_reliable(void)
|
||||
#endif
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
|
||||
tsc_clocksource_reliable = 1;
|
||||
|
||||
/*
|
||||
* Disable the clocksource watchdog when the system has:
|
||||
* - TSC running at constant frequency
|
||||
* - TSC which does not stop in C-States
|
||||
* - the TSC_ADJUST register which allows to detect even minimal
|
||||
* modifications
|
||||
* - not more than two sockets. As the number of sockets cannot be
|
||||
* evaluated at the early boot stage where this has to be
|
||||
* invoked, check the number of online memory nodes as a
|
||||
* fallback solution which is an reasonable estimate.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
|
||||
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
|
||||
nr_online_nodes <= 2)
|
||||
tsc_disable_clocksource_watchdog();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1369,9 +1392,6 @@ static int __init init_tsc_clocksource(void)
|
||||
if (tsc_unstable)
|
||||
goto unreg;
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
|
||||
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
@ -1506,7 +1526,7 @@ void __init tsc_init(void)
|
||||
}
|
||||
|
||||
if (tsc_clocksource_reliable || no_tsc_watchdog)
|
||||
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
|
||||
tsc_disable_clocksource_watchdog();
|
||||
|
||||
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
|
||||
detect_art();
|
||||
|
@ -30,6 +30,7 @@ struct tsc_adjust {
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
|
||||
static struct timer_list tsc_sync_check_timer;
|
||||
|
||||
/*
|
||||
* TSC's on different sockets may be reset asynchronously.
|
||||
@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally the tsc_sync will be checked every time system enters idle
|
||||
* state, but there is still caveat that a system won't enter idle,
|
||||
* either because it's too busy or configured purposely to not enter
|
||||
* idle.
|
||||
*
|
||||
* So setup a periodic timer (every 10 minutes) to make sure the check
|
||||
* is always on.
|
||||
*/
|
||||
|
||||
#define SYNC_CHECK_INTERVAL (HZ * 600)
|
||||
|
||||
static void tsc_sync_check_timer_fn(struct timer_list *unused)
|
||||
{
|
||||
int next_cpu;
|
||||
|
||||
tsc_verify_tsc_adjust(false);
|
||||
|
||||
/* Run the check for all onlined CPUs in turn */
|
||||
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
|
||||
if (next_cpu >= nr_cpu_ids)
|
||||
next_cpu = cpumask_first(cpu_online_mask);
|
||||
|
||||
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
|
||||
add_timer_on(&tsc_sync_check_timer, next_cpu);
|
||||
}
|
||||
|
||||
static int __init start_sync_check_timer(void)
|
||||
{
|
||||
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
|
||||
return 0;
|
||||
|
||||
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
|
||||
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
|
||||
add_timer(&tsc_sync_check_timer);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(start_sync_check_timer);
|
||||
|
||||
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
|
||||
unsigned int cpu, bool bootcpu)
|
||||
{
|
||||
|
@ -266,7 +266,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
|
||||
|
||||
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
|
||||
pmu->reserved_bits = 0xffffffff00200000ull;
|
||||
pmu->reserved_bits = 0xfffffff000280000ull;
|
||||
pmu->version = 1;
|
||||
/* not applicable to AMD; but clean them to prevent any fall out */
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
||||
|
@ -50,6 +50,7 @@ static void __init setup_real_mode(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 *trampoline_pgd;
|
||||
u64 efer;
|
||||
int i;
|
||||
#endif
|
||||
|
||||
base = (unsigned char *)real_mode_header;
|
||||
@ -108,8 +109,17 @@ static void __init setup_real_mode(void)
|
||||
trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
|
||||
|
||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||
|
||||
/* Map the real mode stub as virtual == physical */
|
||||
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
||||
trampoline_pgd[511] = init_top_pgt[511].pgd;
|
||||
|
||||
/*
|
||||
* Include the entirety of the kernel mapping into the trampoline
|
||||
* PGD. This way, all mappings present in the normal kernel page
|
||||
* tables are usable while running on trampoline_pgd.
|
||||
*/
|
||||
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
|
||||
trampoline_pgd[i] = init_top_pgt[i].pgd;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -420,6 +420,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||
/* AMD */
|
||||
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
|
||||
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
|
||||
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
|
||||
/* AMD is using RAID class only for ahci controllers */
|
||||
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
|
||||
|
@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sata_fsl_host_stop(struct ata_host *host)
|
||||
{
|
||||
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* scsi mid-layer and libata interface structures
|
||||
*/
|
||||
@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
|
||||
.port_start = sata_fsl_port_start,
|
||||
.port_stop = sata_fsl_port_stop,
|
||||
|
||||
.host_stop = sata_fsl_host_stop,
|
||||
|
||||
.pmp_attach = sata_fsl_pmp_attach,
|
||||
.pmp_detach = sata_fsl_pmp_detach,
|
||||
};
|
||||
@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
||||
host_priv->ssr_base = ssr_base;
|
||||
host_priv->csr_base = csr_base;
|
||||
|
||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
||||
if (!irq) {
|
||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
||||
irq = platform_get_irq(ofdev, 0);
|
||||
if (irq < 0) {
|
||||
retval = irq;
|
||||
goto error_exit_with_cleanup;
|
||||
}
|
||||
host_priv->irq = irq;
|
||||
@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
|
||||
|
||||
ata_host_detach(host);
|
||||
|
||||
irq_dispose_mapping(host_priv->irq);
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -220,6 +220,8 @@ struct ipmi_user {
|
||||
struct work_struct remove_work;
|
||||
};
|
||||
|
||||
static struct workqueue_struct *remove_work_wq;
|
||||
|
||||
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
|
||||
__acquires(user->release_barrier)
|
||||
{
|
||||
@ -1286,7 +1288,7 @@ static void free_user(struct kref *ref)
|
||||
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
|
||||
|
||||
/* SRCU cleanup must happen in task context. */
|
||||
schedule_work(&user->remove_work);
|
||||
queue_work(remove_work_wq, &user->remove_work);
|
||||
}
|
||||
|
||||
static void _ipmi_destroy_user(struct ipmi_user *user)
|
||||
@ -5161,6 +5163,13 @@ static int ipmi_init_msghandler(void)
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||
|
||||
remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
|
||||
if (!remove_work_wq) {
|
||||
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
|
||||
rv = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
initialized = true;
|
||||
|
||||
out:
|
||||
@ -5186,6 +5195,8 @@ static void __exit cleanup_ipmi(void)
|
||||
int count;
|
||||
|
||||
if (initialized) {
|
||||
destroy_workqueue(remove_work_wq);
|
||||
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&panic_block);
|
||||
|
||||
|
@ -1005,10 +1005,9 @@ static struct kobj_type ktype_cpufreq = {
|
||||
.release = cpufreq_sysfs_release,
|
||||
};
|
||||
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
if (unlikely(!dev))
|
||||
return;
|
||||
|
||||
@ -1394,7 +1393,7 @@ static int cpufreq_online(unsigned int cpu)
|
||||
if (new_policy) {
|
||||
for_each_cpu(j, policy->related_cpus) {
|
||||
per_cpu(cpufreq_cpu_data, j) = policy;
|
||||
add_cpu_dev_symlink(policy, j);
|
||||
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
|
||||
}
|
||||
|
||||
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
|
||||
@ -1558,7 +1557,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
|
||||
/* Create sysfs link on CPU registration */
|
||||
policy = per_cpu(cpufreq_cpu_data, cpu);
|
||||
if (policy)
|
||||
add_cpu_dev_symlink(policy, cpu);
|
||||
add_cpu_dev_symlink(policy, cpu, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
||||
goto free_priv;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
msm_gpu_hw_init(gpu);
|
||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
|
||||
|
@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
|
||||
default MACH_SUN8I
|
||||
select CRC_CCITT
|
||||
select DRM_MIPI_DSI
|
||||
select RESET_CONTROLLER
|
||||
select PHY_SUN6I_MIPI_DPHY
|
||||
help
|
||||
Choose this option if you want have an Allwinner SoC with
|
||||
|
@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
|
||||
}
|
||||
|
||||
static const struct i2c_algorithm cbus_i2c_algo = {
|
||||
.smbus_xfer = cbus_i2c_smbus_xfer,
|
||||
.functionality = cbus_i2c_func,
|
||||
.smbus_xfer = cbus_i2c_smbus_xfer,
|
||||
.smbus_xfer_atomic = cbus_i2c_smbus_xfer,
|
||||
.functionality = cbus_i2c_func,
|
||||
};
|
||||
|
||||
static int cbus_i2c_remove(struct platform_device *pdev)
|
||||
|
@ -1394,6 +1394,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
{
|
||||
struct stm32f7_i2c_dev *i2c_dev = data;
|
||||
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
|
||||
struct stm32_i2c_dma *dma = i2c_dev->dma;
|
||||
void __iomem *base = i2c_dev->base;
|
||||
u32 status, mask;
|
||||
int ret = IRQ_HANDLED;
|
||||
@ -1418,6 +1419,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
if (status & STM32F7_I2C_ISR_NACKF) {
|
||||
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK\n", __func__);
|
||||
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
|
||||
if (i2c_dev->use_dma) {
|
||||
stm32f7_i2c_disable_dma_req(i2c_dev);
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
}
|
||||
f7_msg->result = -ENXIO;
|
||||
}
|
||||
|
||||
@ -1433,7 +1438,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
/* Clear STOP flag */
|
||||
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
|
||||
|
||||
if (i2c_dev->use_dma) {
|
||||
if (i2c_dev->use_dma && !f7_msg->result) {
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
} else {
|
||||
i2c_dev->master_mode = false;
|
||||
@ -1446,7 +1451,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
|
||||
if (f7_msg->stop) {
|
||||
mask = STM32F7_I2C_CR2_STOP;
|
||||
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
|
||||
} else if (i2c_dev->use_dma) {
|
||||
} else if (i2c_dev->use_dma && !f7_msg->result) {
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
} else if (f7_msg->smbus) {
|
||||
stm32f7_i2c_smbus_rep_start(i2c_dev);
|
||||
@ -1586,12 +1591,23 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
|
||||
time_left = wait_for_completion_timeout(&i2c_dev->complete,
|
||||
i2c_dev->adap.timeout);
|
||||
ret = f7_msg->result;
|
||||
if (ret) {
|
||||
/*
|
||||
* It is possible that some unsent data have already been
|
||||
* written into TXDR. To avoid sending old data in a
|
||||
* further transfer, flush TXDR in case of any error
|
||||
*/
|
||||
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||
i2c_dev->base + STM32F7_I2C_ISR);
|
||||
goto pm_free;
|
||||
}
|
||||
|
||||
if (!time_left) {
|
||||
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
|
||||
i2c_dev->msg->addr);
|
||||
if (i2c_dev->use_dma)
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
@ -1634,13 +1650,22 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
|
||||
timeout = wait_for_completion_timeout(&i2c_dev->complete,
|
||||
i2c_dev->adap.timeout);
|
||||
ret = f7_msg->result;
|
||||
if (ret)
|
||||
if (ret) {
|
||||
/*
|
||||
* It is possible that some unsent data have already been
|
||||
* written into TXDR. To avoid sending old data in a
|
||||
* further transfer, flush TXDR in case of any error
|
||||
*/
|
||||
writel_relaxed(STM32F7_I2C_ISR_TXE,
|
||||
i2c_dev->base + STM32F7_I2C_ISR);
|
||||
goto pm_free;
|
||||
}
|
||||
|
||||
if (!timeout) {
|
||||
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
|
||||
if (i2c_dev->use_dma)
|
||||
dmaengine_terminate_all(dma->chan_using);
|
||||
stm32f7_i2c_wait_free_bus(i2c_dev);
|
||||
ret = -ETIMEDOUT;
|
||||
goto pm_free;
|
||||
}
|
||||
|
@ -481,6 +481,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
goto err_exit;
|
||||
|
||||
if (fw.len == 0xFFFFU) {
|
||||
if (sw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid sw len: %x\n", sw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err = hw_atl_utils_fw_rpc_call(self, sw.len);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
@ -489,6 +494,11 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
|
||||
|
||||
if (rpc) {
|
||||
if (fw.len) {
|
||||
if (fw.len > sizeof(self->rpc)) {
|
||||
printk(KERN_INFO "Invalid fw len: %x\n", fw.len);
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
err =
|
||||
hw_atl_utils_fw_downld_dwords(self,
|
||||
self->rpc_addr,
|
||||
|
@ -4708,6 +4708,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
|
||||
lp->ibn = 3;
|
||||
lp->active = *p++;
|
||||
if (MOTO_SROM_BUG) lp->active = 0;
|
||||
/* if (MOTO_SROM_BUG) statement indicates lp->active could
|
||||
* be 8 (i.e. the size of array lp->phy) */
|
||||
if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
|
||||
return -EINVAL;
|
||||
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
|
||||
@ -4999,19 +5003,23 @@ mii_get_phy(struct net_device *dev)
|
||||
}
|
||||
if ((j == limit) && (i < DE4X5_MAX_MII)) {
|
||||
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
if (k < DE4X5_MAX_PHY) {
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
} else {
|
||||
goto purgatory;
|
||||
}
|
||||
}
|
||||
}
|
||||
purgatory:
|
||||
|
@ -398,6 +398,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
|
||||
return;
|
||||
|
||||
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
|
||||
/* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
|
||||
We need check to prevent array overflow */
|
||||
if (port >= DSAF_MAX_PORT_NUM)
|
||||
return;
|
||||
reg_val_1 = 0x1 << port;
|
||||
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
|
||||
/* there is difference between V1 and V2 in register.*/
|
||||
|
@ -5773,7 +5773,7 @@ static int mvpp2_probe(struct platform_device *pdev)
|
||||
|
||||
shared = num_present_cpus() - priv->nthreads;
|
||||
if (shared > 0)
|
||||
bitmap_fill(&priv->lock_map,
|
||||
bitmap_set(&priv->lock_map, 0,
|
||||
min_t(int, shared, MVPP2_MAX_THREADS));
|
||||
|
||||
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
|
||||
|
@ -2281,9 +2281,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
bool carry_xdp_prog)
|
||||
{
|
||||
struct bpf_prog *xdp_prog;
|
||||
int i, t;
|
||||
int i, t, ret;
|
||||
|
||||
mlx4_en_copy_priv(tmp, priv, prof);
|
||||
ret = mlx4_en_copy_priv(tmp, priv, prof);
|
||||
if (ret) {
|
||||
en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (mlx4_en_alloc_resources(tmp)) {
|
||||
en_warn(priv,
|
||||
|
@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int __init sonic_probe1(struct net_device *dev)
|
||||
static int sonic_probe1(struct net_device *dev)
|
||||
{
|
||||
unsigned int silicon_revision;
|
||||
struct sonic_local *lp = netdev_priv(dev);
|
||||
|
@ -1079,8 +1079,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
|
||||
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
|
||||
context_id = recv_ctx->context_id;
|
||||
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
|
||||
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Failed to alloc mbx args %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
|
||||
|
||||
/* set up status rings, mbx 2-81 */
|
||||
|
@ -2136,7 +2136,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
|
||||
if (dev->domain_data.phyirq > 0)
|
||||
phydev->irq = dev->domain_data.phyirq;
|
||||
else
|
||||
phydev->irq = 0;
|
||||
phydev->irq = PHY_POLL;
|
||||
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
|
||||
|
||||
/* set to AUTOMDIX */
|
||||
|
@ -221,6 +221,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
|
||||
/* strip the ethernet header added for pass through VRF device */
|
||||
__skb_pull(skb, skb_network_offset(skb));
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
ret = vrf_ip6_local_out(net, skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
dev->stats.tx_errors++;
|
||||
@ -304,6 +305,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
|
||||
RT_SCOPE_LINK);
|
||||
}
|
||||
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
vrf_dev->stats.tx_errors++;
|
||||
|
@ -1276,23 +1276,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
|
||||
const struct iwl_op_mode_ops *ops = op->ops;
|
||||
struct dentry *dbgfs_dir = NULL;
|
||||
struct iwl_op_mode *op_mode = NULL;
|
||||
int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
|
||||
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
drv->dbgfs_op_mode = debugfs_create_dir(op->name,
|
||||
drv->dbgfs_drv);
|
||||
dbgfs_dir = drv->dbgfs_op_mode;
|
||||
#endif
|
||||
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
|
||||
op_mode = ops->start(drv->trans, drv->trans->cfg,
|
||||
&drv->fw, dbgfs_dir);
|
||||
|
||||
if (op_mode)
|
||||
return op_mode;
|
||||
|
||||
IWL_ERR(drv, "retry init count %d\n", retry);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
if (!op_mode) {
|
||||
debugfs_remove_recursive(drv->dbgfs_op_mode);
|
||||
drv->dbgfs_op_mode = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return op_mode;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void _iwl_op_mode_stop(struct iwl_drv *drv)
|
||||
|
@ -145,4 +145,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
|
||||
#define IWL_EXPORT_SYMBOL(sym)
|
||||
#endif
|
||||
|
||||
/* max retry for init flow */
|
||||
#define IWL_MAX_INIT_RETRY 2
|
||||
|
||||
#endif /* __iwl_drv_h__ */
|
||||
|
@ -71,6 +71,7 @@
|
||||
#include <net/ieee80211_radiotap.h>
|
||||
#include <net/tcp.h>
|
||||
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-op-mode.h"
|
||||
#include "iwl-io.h"
|
||||
#include "mvm.h"
|
||||
@ -1129,9 +1130,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
int ret;
|
||||
int retry, max_retry = 0;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
|
||||
/* we are starting the mac not in error flow, and restart is enabled */
|
||||
if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
|
||||
iwlwifi_mod_params.fw_restart) {
|
||||
max_retry = IWL_MAX_INIT_RETRY;
|
||||
/*
|
||||
* This will prevent mac80211 recovery flows to trigger during
|
||||
* init failures
|
||||
*/
|
||||
set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
}
|
||||
|
||||
for (retry = 0; retry <= max_retry; retry++) {
|
||||
ret = __iwl_mvm_mac_start(mvm);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
IWL_ERR(mvm, "mac start retry %d\n", retry);
|
||||
}
|
||||
clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -1167,6 +1167,8 @@ struct iwl_mvm {
|
||||
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
|
||||
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
|
||||
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
|
||||
* @IWL_MVM_STATUS_STARTING: starting mac,
|
||||
* used to disable restart flow while in STARTING state
|
||||
*/
|
||||
enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_HW_RFKILL,
|
||||
@ -1177,6 +1179,7 @@ enum iwl_mvm_status {
|
||||
IWL_MVM_STATUS_ROC_AUX_RUNNING,
|
||||
IWL_MVM_STATUS_FIRMWARE_RUNNING,
|
||||
IWL_MVM_STATUS_NEED_FLUSH_P2P,
|
||||
IWL_MVM_STATUS_STARTING,
|
||||
};
|
||||
|
||||
/* Keep track of completed init configuration */
|
||||
|
@ -1288,6 +1288,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
||||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_error_collect(&mvm->fwrt);
|
||||
} else if (test_bit(IWL_MVM_STATUS_STARTING,
|
||||
&mvm->status)) {
|
||||
IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
|
@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
|
||||
if (status == -ENODEV || status == -ENOENT)
|
||||
return true;
|
||||
|
||||
if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
|
||||
return false;
|
||||
|
||||
if (status == -EPROTO || status == -ETIMEDOUT)
|
||||
rt2x00dev->num_proto_errs++;
|
||||
else
|
||||
|
@ -1188,15 +1188,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Query FW and update rfkill sw state for all rfkill switches */
|
||||
static void tpacpi_rfk_update_swstate_all(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
|
||||
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync the HW-blocking state of all rfkill switches,
|
||||
* do notice it causes the rfkill core to schedule uevents
|
||||
@ -3135,9 +3126,6 @@ static void tpacpi_send_radiosw_update(void)
|
||||
if (wlsw == TPACPI_RFK_RADIO_OFF)
|
||||
tpacpi_rfk_update_hwblock_state(true);
|
||||
|
||||
/* Sync sw blocking state */
|
||||
tpacpi_rfk_update_swstate_all();
|
||||
|
||||
/* Sync hw blocking state last if it is hw-unblocked */
|
||||
if (wlsw == TPACPI_RFK_RADIO_ON)
|
||||
tpacpi_rfk_update_hwblock_state(false);
|
||||
|
@ -1894,12 +1894,12 @@ static void session_recovery_timedout(struct work_struct *work)
|
||||
}
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
}
|
||||
|
||||
static void __iscsi_unblock_session(struct work_struct *work)
|
||||
|
@ -460,6 +460,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
|
||||
{
|
||||
struct thermal_instance *pos;
|
||||
tz->temperature = THERMAL_TEMP_INVALID;
|
||||
tz->prev_low_trip = -INT_MAX;
|
||||
tz->prev_high_trip = INT_MAX;
|
||||
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
|
||||
pos->initialized = false;
|
||||
}
|
||||
|
@ -1351,29 +1351,33 @@ pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
|
||||
{
|
||||
int scr;
|
||||
int lcr;
|
||||
int actual_baud;
|
||||
int tolerance;
|
||||
|
||||
for (scr = 5 ; scr <= 15 ; scr++) {
|
||||
actual_baud = 921600 * 16 / scr;
|
||||
tolerance = actual_baud / 50;
|
||||
for (scr = 16; scr > 4; scr--) {
|
||||
unsigned int maxrate = port->uartclk / scr;
|
||||
unsigned int divisor = max(maxrate / baud, 1U);
|
||||
int delta = maxrate / divisor - baud;
|
||||
|
||||
if ((baud < actual_baud + tolerance) &&
|
||||
(baud > actual_baud - tolerance)) {
|
||||
if (baud > maxrate + baud / 50)
|
||||
continue;
|
||||
|
||||
if (delta > baud / 50)
|
||||
divisor++;
|
||||
|
||||
if (divisor > 0xffff)
|
||||
continue;
|
||||
|
||||
/* Update delta due to possible divisor change */
|
||||
delta = maxrate / divisor - baud;
|
||||
if (abs(delta) < baud / 50) {
|
||||
lcr = serial_port_in(port, UART_LCR);
|
||||
serial_port_out(port, UART_LCR, lcr | 0x80);
|
||||
|
||||
serial_port_out(port, UART_DLL, 1);
|
||||
serial_port_out(port, UART_DLM, 0);
|
||||
serial_port_out(port, UART_DLL, divisor & 0xff);
|
||||
serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
|
||||
serial_port_out(port, 2, 16 - scr);
|
||||
serial_port_out(port, UART_LCR, lcr);
|
||||
return;
|
||||
} else if (baud > actual_baud) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
serial8250_do_set_divisor(port, baud, quot, quot_frac);
|
||||
}
|
||||
static int pci_pericom_setup(struct serial_private *priv,
|
||||
const struct pciserial_board *board,
|
||||
@ -2285,12 +2289,19 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.setup = pci_pericom_setup_four_at_eight,
|
||||
},
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_ACCESIO,
|
||||
.device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
|
||||
|
@ -2770,6 +2770,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
|
||||
|
||||
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
|
||||
{ "ARMH0011", 0 },
|
||||
{ "ARMHB000", 0 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
|
||||
|
@ -603,6 +603,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CONSOLE_POLL))
|
||||
return;
|
||||
|
||||
if (!dma->chan)
|
||||
return;
|
||||
|
||||
|
@ -1566,6 +1566,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
{
|
||||
struct uart_state *state = container_of(port, struct uart_state, port);
|
||||
struct uart_port *uport = uart_port_check(state);
|
||||
char *buf;
|
||||
|
||||
/*
|
||||
* At this point, we stop accepting input. To do this, we
|
||||
@ -1587,8 +1588,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
*/
|
||||
tty_port_set_suspended(port, 0);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
/*
|
||||
* Free the transmit buffer.
|
||||
*/
|
||||
spin_lock_irq(&uport->lock);
|
||||
buf = state->xmit.buf;
|
||||
state->xmit.buf = NULL;
|
||||
spin_unlock_irq(&uport->lock);
|
||||
|
||||
if (buf)
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
}
|
||||
|
||||
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
|
||||
|
@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
|
||||
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
|
||||
|
||||
/* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
|
||||
{ USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
|
||||
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
|
||||
USB_QUIRK_DISCONNECT_SUSPEND },
|
||||
|
@ -339,7 +339,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
||||
/* Must be called with xhci->lock held, releases and aquires lock back */
|
||||
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
{
|
||||
u32 temp_32;
|
||||
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
|
||||
union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
|
||||
u64 crcr;
|
||||
int ret;
|
||||
|
||||
xhci_dbg(xhci, "Abort command ring\n");
|
||||
@ -348,13 +350,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
|
||||
/*
|
||||
* The control bits like command stop, abort are located in lower
|
||||
* dword of the command ring control register. Limit the write
|
||||
* to the lower dword to avoid corrupting the command ring pointer
|
||||
* in case if the command ring is stopped by the time upper dword
|
||||
* is written.
|
||||
* dword of the command ring control register.
|
||||
* Some controllers require all 64 bits to be written to abort the ring.
|
||||
* Make sure the upper dword is valid, pointing to the next command,
|
||||
* avoiding corrupting the command ring pointer in case the command ring
|
||||
* is stopped by the time the upper dword is written.
|
||||
*/
|
||||
temp_32 = readl(&xhci->op_regs->cmd_ring);
|
||||
writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
if (trb_is_link(new_deq))
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
|
||||
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
|
||||
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
|
||||
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
|
||||
* completion of the Command Abort operation. If CRR is not negated in 5
|
||||
|
@ -3093,11 +3093,7 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
tcpm_try_src(port) ? SRC_TRY
|
||||
: SNK_ATTACHED,
|
||||
0);
|
||||
else
|
||||
/* Wait for VBUS, but not forever */
|
||||
tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
|
||||
break;
|
||||
|
||||
case SRC_TRY:
|
||||
port->try_src_count++;
|
||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||
|
@ -370,11 +370,17 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
struct uni_pagedir *p;
|
||||
|
||||
/*
|
||||
* We cannot be loaded as a module, therefore init is always 1,
|
||||
* but vgacon_init can be called more than once, and init will
|
||||
* not be 1.
|
||||
* We cannot be loaded as a module, therefore init will be 1
|
||||
* if we are the default console, however if we are a fallback
|
||||
* console, for example if fbcon has failed registration, then
|
||||
* init will be 0, so we need to make sure our boot parameters
|
||||
* have been copied to the console structure for vgacon_resize
|
||||
* ultimately called by vc_resize. Any subsequent calls to
|
||||
* vgacon_init init will have init set to 0 too.
|
||||
*/
|
||||
c->vc_can_do_color = vga_can_do_color;
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
|
||||
/* set dimensions manually if init != 0 since vc_resize() will fail */
|
||||
if (init) {
|
||||
@ -383,8 +389,6 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
} else
|
||||
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
|
||||
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
c->vc_complement_mask = 0x7700;
|
||||
if (vga_512_chars)
|
||||
c->vc_hi_font_mask = 0x0800;
|
||||
|
@ -3636,11 +3636,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
|
||||
*/
|
||||
static void write_dev_flush(struct btrfs_device *device)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
struct bio *bio = device->flush_bio;
|
||||
|
||||
#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
/*
|
||||
* When a disk has write caching disabled, we skip submission of a bio
|
||||
* with flush and sync requests before writing the superblock, since
|
||||
* it's not needed. However when the integrity checker is enabled, this
|
||||
* results in reports that there are metadata blocks referred by a
|
||||
* superblock that were not properly flushed. So don't skip the bio
|
||||
* submission only when the integrity checker is enabled for the sake
|
||||
* of simplicity, since this is a debug tool and not meant for use in
|
||||
* non-debug builds.
|
||||
*/
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
return;
|
||||
#endif
|
||||
|
||||
bio_reset(bio);
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
|
@ -723,6 +723,10 @@ static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
|
||||
file = NULL;
|
||||
else if (!get_file_rcu_many(file, refs))
|
||||
goto loop;
|
||||
else if (__fcheck_files(files, fd) != file) {
|
||||
fput_many(file, refs);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -940,7 +940,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
|
||||
else if (height == ip->i_height)
|
||||
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
|
||||
else
|
||||
iomap->length = size - pos;
|
||||
iomap->length = size - iomap->offset;
|
||||
} else if (flags & IOMAP_WRITE) {
|
||||
u64 alloc_size;
|
||||
|
||||
|
@ -295,8 +295,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
|
||||
goto out;
|
||||
}
|
||||
|
||||
truncate_pagecache_range(dst_inode, pos_dst,
|
||||
pos_dst + res->write_res.count);
|
||||
WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
|
||||
pos_dst >> PAGE_SHIFT,
|
||||
(pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
|
||||
|
||||
status = res->write_res.count;
|
||||
out:
|
||||
|
@ -155,6 +155,8 @@ struct kretprobe {
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
#define KRETPROBE_MAX_DATA_SIZE 4096
|
||||
|
||||
struct kretprobe_instance {
|
||||
struct hlist_node hlist;
|
||||
struct kretprobe *rp;
|
||||
|
@ -3961,7 +3961,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
|
||||
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
||||
{
|
||||
spin_lock(&txq->_xmit_lock);
|
||||
txq->xmit_lock_owner = cpu;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, cpu);
|
||||
}
|
||||
|
||||
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
|
||||
@ -3978,26 +3979,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
|
||||
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
||||
{
|
||||
spin_lock_bh(&txq->_xmit_lock);
|
||||
txq->xmit_lock_owner = smp_processor_id();
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
||||
}
|
||||
|
||||
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
||||
{
|
||||
bool ok = spin_trylock(&txq->_xmit_lock);
|
||||
if (likely(ok))
|
||||
txq->xmit_lock_owner = smp_processor_id();
|
||||
|
||||
if (likely(ok)) {
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
||||
{
|
||||
txq->xmit_lock_owner = -1;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, -1);
|
||||
spin_unlock(&txq->_xmit_lock);
|
||||
}
|
||||
|
||||
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
||||
{
|
||||
txq->xmit_lock_owner = -1;
|
||||
/* Pairs with READ_ONCE() in __dev_queue_xmit() */
|
||||
WRITE_ONCE(txq->xmit_lock_owner, -1);
|
||||
spin_unlock_bh(&txq->_xmit_lock);
|
||||
}
|
||||
|
||||
|
@ -6,6 +6,9 @@
|
||||
#ifndef __LINUX_OF_CLK_H
|
||||
#define __LINUX_OF_CLK_H
|
||||
|
||||
struct device_node;
|
||||
struct of_device_id;
|
||||
|
||||
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
|
||||
|
||||
unsigned int of_clk_get_parent_count(struct device_node *np);
|
||||
|
@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
|
||||
}
|
||||
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
|
||||
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
|
||||
@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
|
||||
static inline u64 siphash(const void *data, size_t len,
|
||||
const siphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
return __siphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___siphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
@ -96,10 +93,8 @@ typedef struct {
|
||||
|
||||
u32 __hsiphash_aligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
|
||||
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
|
||||
@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
|
||||
static inline u32 hsiphash(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
return __hsiphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___hsiphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ struct fib_rules_ops {
|
||||
int (*action)(struct fib_rule *,
|
||||
struct flowi *, int,
|
||||
struct fib_lookup_arg *);
|
||||
bool (*suppress)(struct fib_rule *,
|
||||
bool (*suppress)(struct fib_rule *, int,
|
||||
struct fib_lookup_arg *);
|
||||
int (*match)(struct fib_rule *,
|
||||
struct flowi *, int);
|
||||
|
@ -412,7 +412,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
static inline int fib_num_tclassid_users(struct net *net)
|
||||
{
|
||||
return net->ipv4.fib_num_tclassid_users;
|
||||
return atomic_read(&net->ipv4.fib_num_tclassid_users);
|
||||
}
|
||||
#else
|
||||
static inline int fib_num_tclassid_users(struct net *net)
|
||||
|
@ -61,7 +61,7 @@ struct netns_ipv4 {
|
||||
#endif
|
||||
bool fib_has_custom_local_routes;
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
int fib_num_tclassid_users;
|
||||
atomic_t fib_num_tclassid_users;
|
||||
#endif
|
||||
struct hlist_head *fib_table_hash;
|
||||
bool fib_offload_disabled;
|
||||
|
@ -2003,6 +2003,9 @@ int register_kretprobe(struct kretprobe *rp)
|
||||
}
|
||||
}
|
||||
|
||||
if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
rp->kp.pre_handler = pre_handler_kretprobe;
|
||||
rp->kp.post_handler = NULL;
|
||||
rp->kp.fault_handler = NULL;
|
||||
|
@ -1347,7 +1347,7 @@ static void __init init_uclamp_rq(struct rq *rq)
|
||||
};
|
||||
}
|
||||
|
||||
rq->uclamp_flags = 0;
|
||||
rq->uclamp_flags = UCLAMP_FLAG_IDLE;
|
||||
}
|
||||
|
||||
static void __init init_uclamp(void)
|
||||
|
@ -49,6 +49,7 @@
|
||||
SIPROUND; \
|
||||
return (v0 ^ v1) ^ (v2 ^ v3);
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
POSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__siphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
POSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__siphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
|
||||
@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
|
||||
HSIPROUND; \
|
||||
return (v0 ^ v1) ^ (v2 ^ v3);
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
|
||||
@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
|
||||
HSIPROUND; \
|
||||
return v1 ^ v3;
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u32));
|
||||
@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
|
||||
|
@ -2004,6 +2004,12 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
|
||||
extd = J1939_ETP;
|
||||
/* fall through */
|
||||
case J1939_TP_CMD_BAM: /* fall through */
|
||||
if (cmd == J1939_TP_CMD_BAM && !j1939_cb_is_broadcast(skcb)) {
|
||||
netdev_err_once(priv->ndev, "%s: BAM to unicast (%02x), ignoring!\n",
|
||||
__func__, skcb->addr.sa);
|
||||
return;
|
||||
}
|
||||
fallthrough;
|
||||
case J1939_TP_CMD_RTS: /* fall through */
|
||||
if (skcb->addr.type != extd)
|
||||
return;
|
||||
|
@ -3771,7 +3771,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
if (dev->flags & IFF_UP) {
|
||||
int cpu = smp_processor_id(); /* ok because BHs are off */
|
||||
|
||||
if (txq->xmit_lock_owner != cpu) {
|
||||
/* Other cpus might concurrently change txq->xmit_lock_owner
|
||||
* to -1 or to their cpu id, but not to our id.
|
||||
*/
|
||||
if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
|
||||
if (dev_xmit_recursion())
|
||||
goto recursion_alert;
|
||||
|
||||
|
@ -300,7 +300,7 @@ int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
|
||||
else
|
||||
err = ops->action(rule, fl, flags, arg);
|
||||
|
||||
if (!err && ops->suppress && ops->suppress(rule, arg))
|
||||
if (!err && ops->suppress && ops->suppress(rule, flags, arg))
|
||||
continue;
|
||||
|
||||
if (err != -EAGAIN) {
|
||||
|
@ -2585,7 +2585,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
|
||||
free:
|
||||
kfree(t);
|
||||
out:
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __devinet_sysctl_unregister(struct net *net,
|
||||
|
@ -1588,7 +1588,7 @@ static int __net_init fib_net_init(struct net *net)
|
||||
int error;
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
net->ipv4.fib_num_tclassid_users = 0;
|
||||
atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
|
||||
#endif
|
||||
error = ip_fib_net_init(net);
|
||||
if (error < 0)
|
||||
|
@ -137,7 +137,7 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
|
||||
static bool fib4_rule_suppress(struct fib_rule *rule, int flags, struct fib_lookup_arg *arg)
|
||||
{
|
||||
struct fib_result *result = (struct fib_result *) arg->result;
|
||||
struct net_device *dev = NULL;
|
||||
@ -258,7 +258,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
|
||||
if (tb[FRA_FLOW]) {
|
||||
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
|
||||
if (rule4->tclassid)
|
||||
net->ipv4.fib_num_tclassid_users++;
|
||||
atomic_inc(&net->ipv4.fib_num_tclassid_users);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -290,7 +290,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
|
||||
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
if (((struct fib4_rule *)rule)->tclassid)
|
||||
net->ipv4.fib_num_tclassid_users--;
|
||||
atomic_dec(&net->ipv4.fib_num_tclassid_users);
|
||||
#endif
|
||||
net->ipv4.fib_has_custom_rules = true;
|
||||
|
||||
|
@ -222,7 +222,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
|
||||
{
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
if (fib_nh->nh_tclassid)
|
||||
net->ipv4.fib_num_tclassid_users--;
|
||||
atomic_dec(&net->ipv4.fib_num_tclassid_users);
|
||||
#endif
|
||||
fib_nh_common_release(&fib_nh->nh_common);
|
||||
}
|
||||
@ -624,7 +624,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
|
||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||
nh->nh_tclassid = cfg->fc_flow;
|
||||
if (nh->nh_tclassid)
|
||||
net->ipv4.fib_num_tclassid_users++;
|
||||
atomic_inc(&net->ipv4.fib_num_tclassid_users);
|
||||
#endif
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
nh->fib_nh_weight = nh_weight;
|
||||
|
@ -260,7 +260,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
|
||||
return __fib6_rule_action(rule, flp, flags, arg);
|
||||
}
|
||||
|
||||
static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
|
||||
static bool fib6_rule_suppress(struct fib_rule *rule, int flags, struct fib_lookup_arg *arg)
|
||||
{
|
||||
struct fib6_result *res = arg->result;
|
||||
struct rt6_info *rt = res->rt6;
|
||||
@ -287,8 +287,7 @@ static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg
|
||||
return false;
|
||||
|
||||
suppress_route:
|
||||
if (!(arg->flags & FIB_LOOKUP_NOREF))
|
||||
ip6_rt_put(rt);
|
||||
ip6_rt_put_flags(rt, flags);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1918,7 +1918,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
|
||||
int keyid = rx->sta->ptk_idx;
|
||||
sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
|
||||
|
||||
if (ieee80211_has_protected(fc)) {
|
||||
if (ieee80211_has_protected(fc) &&
|
||||
!(status->flag & RX_FLAG_IV_STRIPPED)) {
|
||||
cs = rx->sta->cipher_scheme;
|
||||
keyid = ieee80211_get_keyid(rx->skb, cs);
|
||||
|
||||
|
@ -1491,22 +1491,52 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
|
||||
kfree(mdev);
|
||||
}
|
||||
|
||||
static void mpls_ifdown(struct net_device *dev, int event)
|
||||
static int mpls_ifdown(struct net_device *dev, int event)
|
||||
{
|
||||
struct mpls_route __rcu **platform_label;
|
||||
struct net *net = dev_net(dev);
|
||||
u8 alive, deleted;
|
||||
unsigned index;
|
||||
|
||||
platform_label = rtnl_dereference(net->mpls.platform_label);
|
||||
for (index = 0; index < net->mpls.platform_labels; index++) {
|
||||
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
|
||||
bool nh_del = false;
|
||||
u8 alive = 0;
|
||||
|
||||
if (!rt)
|
||||
continue;
|
||||
|
||||
alive = 0;
|
||||
deleted = 0;
|
||||
if (event == NETDEV_UNREGISTER) {
|
||||
u8 deleted = 0;
|
||||
|
||||
for_nexthops(rt) {
|
||||
struct net_device *nh_dev =
|
||||
rtnl_dereference(nh->nh_dev);
|
||||
|
||||
if (!nh_dev || nh_dev == dev)
|
||||
deleted++;
|
||||
if (nh_dev == dev)
|
||||
nh_del = true;
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
/* if there are no more nexthops, delete the route */
|
||||
if (deleted == rt->rt_nhn) {
|
||||
mpls_route_update(net, index, NULL, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nh_del) {
|
||||
size_t size = sizeof(*rt) + rt->rt_nhn *
|
||||
rt->rt_nh_size;
|
||||
struct mpls_route *orig = rt;
|
||||
|
||||
rt = kmalloc(size, GFP_KERNEL);
|
||||
if (!rt)
|
||||
return -ENOMEM;
|
||||
memcpy(rt, orig, size);
|
||||
}
|
||||
}
|
||||
|
||||
change_nexthops(rt) {
|
||||
unsigned int nh_flags = nh->nh_flags;
|
||||
|
||||
@ -1530,16 +1560,15 @@ static void mpls_ifdown(struct net_device *dev, int event)
|
||||
next:
|
||||
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
|
||||
alive++;
|
||||
if (!rtnl_dereference(nh->nh_dev))
|
||||
deleted++;
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
||||
|
||||
/* if there are no more nexthops, delete the route */
|
||||
if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
|
||||
mpls_route_update(net, index, NULL, NULL);
|
||||
if (nh_del)
|
||||
mpls_route_update(net, index, rt, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mpls_ifup(struct net_device *dev, unsigned int flags)
|
||||
@ -1607,8 +1636,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
return NOTIFY_OK;
|
||||
|
||||
switch (event) {
|
||||
int err;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
mpls_ifdown(dev, event);
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
break;
|
||||
case NETDEV_UP:
|
||||
flags = dev_get_flags(dev);
|
||||
@ -1619,13 +1652,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
break;
|
||||
case NETDEV_CHANGE:
|
||||
flags = dev_get_flags(dev);
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
|
||||
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
|
||||
else
|
||||
mpls_ifdown(dev, event);
|
||||
} else {
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
}
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
mpls_ifdown(dev, event);
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (mdev) {
|
||||
mpls_dev_sysctl_unregister(dev, mdev);
|
||||
@ -1636,8 +1674,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
case NETDEV_CHANGENAME:
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (mdev) {
|
||||
int err;
|
||||
|
||||
mpls_dev_sysctl_unregister(dev, mdev);
|
||||
err = mpls_dev_sysctl_register(dev, mdev);
|
||||
if (err)
|
||||
|
@ -510,7 +510,7 @@ void rds_tcp_tune(struct socket *sock)
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
}
|
||||
if (rtn->rcvbuf_size > 0) {
|
||||
sk->sk_sndbuf = rtn->rcvbuf_size;
|
||||
sk->sk_rcvbuf = rtn->rcvbuf_size;
|
||||
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
||||
}
|
||||
release_sock(sk);
|
||||
|
@ -297,6 +297,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
|
||||
return peer;
|
||||
}
|
||||
|
||||
static void rxrpc_free_peer(struct rxrpc_peer *peer)
|
||||
{
|
||||
rxrpc_put_local(peer->local);
|
||||
kfree_rcu(peer, rcu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a new incoming peer. There shouldn't be any other matching peers
|
||||
* since we've already done a search in the list from the non-reentrant context
|
||||
@ -363,7 +369,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
if (peer)
|
||||
kfree(candidate);
|
||||
rxrpc_free_peer(candidate);
|
||||
else
|
||||
peer = candidate;
|
||||
}
|
||||
@ -418,8 +424,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
|
||||
list_del_init(&peer->keepalive_link);
|
||||
spin_unlock_bh(&rxnet->peer_hash_lock);
|
||||
|
||||
rxrpc_put_local(peer->local);
|
||||
kfree_rcu(peer, rcu);
|
||||
rxrpc_free_peer(peer);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -455,8 +460,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
|
||||
if (n == 0) {
|
||||
hash_del_rcu(&peer->hash_link);
|
||||
list_del_init(&peer->keepalive_link);
|
||||
rxrpc_put_local(peer->local);
|
||||
kfree_rcu(peer, rcu);
|
||||
rxrpc_free_peer(peer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -467,12 +467,26 @@ static void smc_link_save_peer_info(struct smc_link *link,
|
||||
|
||||
static void smc_switch_to_fallback(struct smc_sock *smc)
|
||||
{
|
||||
wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
|
||||
wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
|
||||
unsigned long flags;
|
||||
|
||||
smc->use_fallback = true;
|
||||
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
|
||||
smc->clcsock->file = smc->sk.sk_socket->file;
|
||||
smc->clcsock->file->private_data = smc->clcsock;
|
||||
smc->clcsock->wq.fasync_list =
|
||||
smc->sk.sk_socket->wq.fasync_list;
|
||||
|
||||
/* There may be some entries remaining in
|
||||
* smc socket->wq, which should be removed
|
||||
* to clcsocket->wq during the fallback.
|
||||
*/
|
||||
spin_lock_irqsave(&smc_wait->lock, flags);
|
||||
spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
|
||||
list_splice_init(&smc_wait->head, &clc_wait->head);
|
||||
spin_unlock(&clc_wait->lock);
|
||||
spin_unlock_irqrestore(&smc_wait->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,6 +183,7 @@ int smc_close_active(struct smc_sock *smc)
|
||||
int old_state;
|
||||
long timeout;
|
||||
int rc = 0;
|
||||
int rc1 = 0;
|
||||
|
||||
timeout = current->flags & PF_EXITING ?
|
||||
0 : sock_flag(sk, SOCK_LINGER) ?
|
||||
@ -222,8 +223,11 @@ int smc_close_active(struct smc_sock *smc)
|
||||
/* actively shutdown clcsock before peer close it,
|
||||
* prevent peer from entering TIME_WAIT state.
|
||||
*/
|
||||
if (smc->clcsock && smc->clcsock->sk)
|
||||
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
rc1 = kernel_sock_shutdown(smc->clcsock,
|
||||
SHUT_RDWR);
|
||||
rc = rc ? rc : rc1;
|
||||
}
|
||||
} else {
|
||||
/* peer event has changed the state */
|
||||
goto again;
|
||||
|
@ -512,7 +512,7 @@ static int tls_do_encryption(struct sock *sk,
|
||||
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
|
||||
prot->iv_size + prot->salt_size);
|
||||
|
||||
xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
|
||||
xor_iv_with_seq(prot->version, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
|
||||
|
||||
sge->offset += prot->prepend_size;
|
||||
sge->length -= prot->prepend_size;
|
||||
@ -1483,7 +1483,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
|
||||
else
|
||||
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
|
||||
|
||||
xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
|
||||
xor_iv_with_seq(prot->version, iv + iv_offset, tls_ctx->rx.rec_seq);
|
||||
|
||||
/* Prepare AAD */
|
||||
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
|
||||
|
@ -569,14 +569,17 @@ static int report__browse_hists(struct report *rep)
|
||||
int ret;
|
||||
struct perf_session *session = rep->session;
|
||||
struct evlist *evlist = session->evlist;
|
||||
const char *help = perf_tip(system_path(TIPDIR));
|
||||
char *help = NULL, *path = NULL;
|
||||
|
||||
if (help == NULL) {
|
||||
path = system_path(TIPDIR);
|
||||
if (perf_tip(&help, path) || help == NULL) {
|
||||
/* fallback for people who don't install perf ;-) */
|
||||
help = perf_tip(DOCDIR);
|
||||
if (help == NULL)
|
||||
help = "Cannot load tips.txt file, please install perf!";
|
||||
free(path);
|
||||
path = system_path(DOCDIR);
|
||||
if (perf_tip(&help, path) || help == NULL)
|
||||
help = strdup("Cannot load tips.txt file, please install perf!");
|
||||
}
|
||||
free(path);
|
||||
|
||||
switch (use_browser) {
|
||||
case 1:
|
||||
@ -598,7 +601,7 @@ static int report__browse_hists(struct report *rep)
|
||||
ret = perf_evlist__tty_browse_hists(evlist, rep, help);
|
||||
break;
|
||||
}
|
||||
|
||||
free(help);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -472,6 +472,18 @@ struct perf_hpp_list perf_hpp_list = {
|
||||
#undef __HPP_SORT_ACC_FN
|
||||
#undef __HPP_SORT_RAW_FN
|
||||
|
||||
static void fmt_free(struct perf_hpp_fmt *fmt)
|
||||
{
|
||||
/*
|
||||
* At this point fmt should be completely
|
||||
* unhooked, if not it's a bug.
|
||||
*/
|
||||
BUG_ON(!list_empty(&fmt->list));
|
||||
BUG_ON(!list_empty(&fmt->sort_list));
|
||||
|
||||
if (fmt->free)
|
||||
fmt->free(fmt);
|
||||
}
|
||||
|
||||
void perf_hpp__init(void)
|
||||
{
|
||||
@ -535,9 +547,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
|
||||
list_add(&format->sort_list, &list->sorts);
|
||||
}
|
||||
|
||||
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
|
||||
static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
|
||||
{
|
||||
list_del_init(&format->list);
|
||||
fmt_free(format);
|
||||
}
|
||||
|
||||
void perf_hpp__cancel_cumulate(void)
|
||||
@ -609,19 +622,6 @@ void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
|
||||
}
|
||||
|
||||
|
||||
static void fmt_free(struct perf_hpp_fmt *fmt)
|
||||
{
|
||||
/*
|
||||
* At this point fmt should be completely
|
||||
* unhooked, if not it's a bug.
|
||||
*/
|
||||
BUG_ON(!list_empty(&fmt->list));
|
||||
BUG_ON(!list_empty(&fmt->sort_list));
|
||||
|
||||
if (fmt->free)
|
||||
fmt->free(fmt);
|
||||
}
|
||||
|
||||
void perf_hpp__reset_output_field(struct perf_hpp_list *list)
|
||||
{
|
||||
struct perf_hpp_fmt *fmt, *tmp;
|
||||
|
@ -361,7 +361,6 @@ enum {
|
||||
};
|
||||
|
||||
void perf_hpp__init(void);
|
||||
void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
|
||||
void perf_hpp__cancel_cumulate(void);
|
||||
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
|
||||
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
|
||||
|
@ -343,32 +343,32 @@ fetch_kernel_version(unsigned int *puint, char *str,
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *perf_tip(const char *dirpath)
|
||||
int perf_tip(char **strp, const char *dirpath)
|
||||
{
|
||||
struct strlist *tips;
|
||||
struct str_node *node;
|
||||
char *tip = NULL;
|
||||
struct strlist_config conf = {
|
||||
.dirname = dirpath,
|
||||
.file_only = true,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
*strp = NULL;
|
||||
tips = strlist__new("tips.txt", &conf);
|
||||
if (tips == NULL)
|
||||
return errno == ENOENT ? NULL :
|
||||
"Tip: check path of tips.txt or get more memory! ;-p";
|
||||
return -errno;
|
||||
|
||||
if (strlist__nr_entries(tips) == 0)
|
||||
goto out;
|
||||
|
||||
node = strlist__entry(tips, random() % strlist__nr_entries(tips));
|
||||
if (asprintf(&tip, "Tip: %s", node->s) < 0)
|
||||
tip = (char *)"Tip: get more memory! ;-)";
|
||||
if (asprintf(strp, "Tip: %s", node->s) < 0)
|
||||
ret = -ENOMEM;
|
||||
|
||||
out:
|
||||
strlist__delete(tips);
|
||||
|
||||
return tip;
|
||||
return ret;
|
||||
}
|
||||
|
||||
char *perf_exe(char *buf, int len)
|
||||
|
@ -37,7 +37,7 @@ int fetch_kernel_version(unsigned int *puint,
|
||||
#define KVER_FMT "%d.%d.%d"
|
||||
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
|
||||
|
||||
const char *perf_tip(const char *dirpath);
|
||||
int perf_tip(char **strp, const char *dirpath);
|
||||
|
||||
#ifndef HAVE_SCHED_GETCPU_SUPPORT
|
||||
int sched_getcpu(void);
|
||||
|
@ -3450,8 +3450,8 @@ EOF
|
||||
################################################################################
|
||||
# main
|
||||
|
||||
TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
|
||||
TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
|
||||
TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
|
||||
TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
|
||||
TESTS_OTHER="use_cases"
|
||||
|
||||
PAUSE_ON_FAIL=no
|
||||
|
Loading…
Reference in New Issue
Block a user