This is the 5.4.120 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmCkyEcACgkQONu9yGCS
 aT70Qg//Rv09McvLQ+8E0OilJ7TdT0UthXQFP+uPTu+/HPeHQkCO168cn1hbwD9K
 i0YfFYB7PqPe/wccHNsmWHSUYCzA9NnwExA84/jofjswkEMMc95x/bow5/xmLe/5
 ImkjODPVHuQWewgMfbSmNu7Br4wmQC5U/K4r7hp/Aa0FdTjcHMI6Zw40FGbJrWmq
 kiqhW9CeagKbxWrihQNLrSB4E5CdpNNkug/zVus2n9jlFT4tltNGSd7bPsxrp7LN
 EdTfayyPUVZeCoysTNA0WZgz47f+z47vAdIlDHzWCIOZcM1RnJXKA5kFXRf8Fnfa
 +hyvaHSDqYGdRgZxYMcXLL+/cS4foQ/8iQxZBCMomABM0MNUuoJ5tYR6GVetlRcR
 46ZC/5OAvNoKY2Kj4Ky4ROF7aMR3NkYCY6wHUVRcw8778bmuReeLJJPsWojAI+4F
 pWT08+7OUJYb3hRnGxxzKot6CPztdkpQXfXMy+wyNlNbRZ/ivs9/f/GhdblXy/6T
 j12LKIh1IOxpB/wi7GRfeABUuC4MU8xqx6FuPDrBgCTMfVig/wcwF27AUr//a0F5
 xrrzCrDFNAvuyD1WyYilaxWDHAe2o9ROT0JZ4VB3zu40w2VlTT77aqA174xfQa6b
 418Eykw3O11dmsY8AQPTt1HhkDCiewEe4K58CJcmCNEf/inFbvI=
 =kNQc
 -----END PGP SIGNATURE-----

Merge 5.4.120 into android11-5.4-lts

Changes in 5.4.120
	tpm: fix error return code in tpm2_get_cc_attrs_tbl()
	tpm, tpm_tis: Extend locality handling to TPM2 in tpm_tis_gen_interrupt()
	tpm, tpm_tis: Reserve locality in tpm_tis_resume()
	KVM: x86/mmu: Remove the defunct update_pte() paging hook
	PM: runtime: Fix unpaired parent child_count for force_resume
	fs: dlm: fix debugfs dump
	tipc: convert dest node's address to network order
	ASoC: Intel: bytcr_rt5640: Enable jack-detect support on Asus T100TAF
	net: stmmac: Set FIFO sizes for ipq806x
	ASoC: rsnd: core: Check convert rate in rsnd_hw_params
	i2c: bail out early when RDWR parameters are wrong
	ALSA: hdsp: don't disable if not enabled
	ALSA: hdspm: don't disable if not enabled
	ALSA: rme9652: don't disable if not enabled
	ALSA: bebob: enable to deliver MIDI messages for multiple ports
	Bluetooth: Set CONF_NOT_COMPLETE as l2cap_chan default
	Bluetooth: initialize skb_queue_head at l2cap_chan_create()
	net: bridge: when suppression is enabled exclude RARP packets
	Bluetooth: check for zapped sk before connecting
	ip6_vti: proper dev_{hold|put} in ndo_[un]init methods
	ASoC: Intel: bytcr_rt5640: Add quirk for the Chuwi Hi8 tablet
	i2c: Add I2C_AQ_NO_REP_START adapter quirk
	mac80211: clear the beacon's CRC after channel switch
	pinctrl: samsung: use 'int' for register masks in Exynos
	mt76: mt76x0: disable GTK offloading
	cuse: prevent clone
	ASoC: rsnd: call rsnd_ssi_master_clk_start() from rsnd_ssi_init()
	Revert "iommu/amd: Fix performance counter initialization"
	iommu/amd: Remove performance counter pre-initialization test
	drm/amd/display: Force vsync flip when reconfiguring MPCC
	selftests: Set CC to clang in lib.mk if LLVM is set
	kconfig: nconf: stop endless search loops
	ALSA: hda/hdmi: fix race in handling acomp ELD notification at resume
	sctp: Fix out-of-bounds warning in sctp_process_asconf_param()
	flow_dissector: Fix out-of-bounds warning in __skb_flow_bpf_to_target()
	powerpc/smp: Set numa node before updating mask
	ASoC: rt286: Generalize support for ALC3263 codec
	ethtool: ioctl: Fix out-of-bounds warning in store_link_ksettings_for_user()
	net: sched: tapr: prevent cycle_time == 0 in parse_taprio_schedule
	samples/bpf: Fix broken tracex1 due to kprobe argument change
	powerpc/pseries: Stop calling printk in rtas_stop_self()
	drm/amd/display: fixed divide by zero kernel crash during dsc enablement
	wl3501_cs: Fix out-of-bounds warnings in wl3501_send_pkt
	wl3501_cs: Fix out-of-bounds warnings in wl3501_mgmt_join
	qtnfmac: Fix possible buffer overflow in qtnf_event_handle_external_auth
	powerpc/iommu: Annotate nested lock for lockdep
	iavf: remove duplicate free resources calls
	net: ethernet: mtk_eth_soc: fix RX VLAN offload
	bnxt_en: Add PCI IDs for Hyper-V VF devices.
	ia64: module: fix symbolizer crash on fdescr
	ASoC: rt286: Make RT286_SET_GPIO_* readable and writable
	thermal: thermal_of: Fix error return code of thermal_of_populate_bind_params()
	f2fs: fix a redundant call to f2fs_balance_fs if an error occurs
	PCI: iproc: Fix return value of iproc_msi_irq_domain_alloc()
	PCI: Release OF node in pci_scan_device()'s error path
	ARM: 9064/1: hw_breakpoint: Do not directly check the event's overflow_handler hook
	rpmsg: qcom_glink_native: fix error return code of qcom_glink_rx_data()
	NFSv4.2: Always flush out writes in nfs42_proc_fallocate()
	NFS: Deal correctly with attribute generation counter overflow
	PCI: endpoint: Fix missing destroy_workqueue()
	pNFS/flexfiles: fix incorrect size check in decode_nfs_fh()
	NFSv4.2 fix handling of sr_eof in SEEK's reply
	rtc: fsl-ftm-alarm: add MODULE_TABLE()
	ceph: fix inode leak on getattr error in __fh_to_dentry
	rtc: ds1307: Fix wday settings for rx8130
	net: hns3: fix incorrect configuration for igu_egu_hw_err
	net: hns3: initialize the message content in hclge_get_link_mode()
	net: hns3: add check for HNS3_NIC_STATE_INITED in hns3_reset_notify_up_enet()
	net: hns3: fix for vxlan gpe tx checksum bug
	net: hns3: use netif_tx_disable to stop the transmit queue
	net: hns3: disable phy loopback setting in hclge_mac_start_phy
	sctp: do asoc update earlier in sctp_sf_do_dupcook_a
	RISC-V: Fix error code returned by riscv_hartid_to_cpuid()
	sunrpc: Fix misplaced barrier in call_decode
	ethernet:enic: Fix a use after free bug in enic_hard_start_xmit
	sctp: fix a SCTP_MIB_CURRESTAB leak in sctp_sf_do_dupcook_b
	netfilter: xt_SECMARK: add new revision to fix structure layout
	drm/radeon: Fix off-by-one power_state index heap overwrite
	drm/radeon: Avoid power table parsing memory leaks
	khugepaged: fix wrong result value for trace_mm_collapse_huge_page_isolate()
	mm/hugeltb: handle the error case in hugetlb_fix_reserve_counts()
	mm/migrate.c: fix potential indeterminate pte entry in migrate_vma_insert_page()
	ksm: fix potential missing rmap_item for stable_node
	net: fix nla_strcmp to handle more then one trailing null character
	smc: disallow TCP_ULP in smc_setsockopt()
	netfilter: nfnetlink_osf: Fix a missing skb_header_pointer() NULL check
	can: m_can: m_can_tx_work_queue(): fix tx_skb race condition
	sched: Fix out-of-bound access in uclamp
	sched/fair: Fix unfairness caused by missing load decay
	kernel: kexec_file: fix error return code of kexec_calculate_store_digests()
	netfilter: nftables: avoid overflows in nft_hash_buckets()
	i40e: Fix use-after-free in i40e_client_subtask()
	i40e: fix the restart auto-negotiation after FEC modified
	i40e: Fix PHY type identifiers for 2.5G and 5G adapters
	ARC: entry: fix off-by-one error in syscall number validation
	ARC: mm: PAE: use 40-bit physical page mask
	powerpc/64s: Fix crashes when toggling stf barrier
	powerpc/64s: Fix crashes when toggling entry flush barrier
	hfsplus: prevent corruption in shrinking truncate
	squashfs: fix divide error in calculate_skip()
	userfaultfd: release page in error path to avoid BUG_ON
	mm/hugetlb: fix F_SEAL_FUTURE_WRITE
	drm/radeon/dpm: Disable sclk switching on Oland when two 4K 60Hz monitors are connected
	drm/i915: Avoid div-by-zero on gen2
	iio: proximity: pulsedlight: Fix rumtime PM imbalance on error
	usb: fotg210-hcd: Fix an error message
	hwmon: (occ) Fix poll rate limiting
	ACPI: scan: Fix a memory leak in an error handling path
	kyber: fix out of bounds access when preempted
	nbd: Fix NULL pointer in flush_workqueue
	blk-mq: Swap two calls in blk_mq_exit_queue()
	iomap: fix sub-page uptodate handling
	usb: dwc3: omap: improve extcon initialization
	usb: dwc3: pci: Enable usb2-gadget-lpm-disable for Intel Merrifield
	usb: xhci: Increase timeout for HC halt
	usb: dwc2: Fix gadget DMA unmap direction
	usb: core: hub: fix race condition about TRSMRCY of resume
	usb: dwc3: gadget: Return success always for kick transfer in ep queue
	xhci: Do not use GFP_KERNEL in (potentially) atomic context
	xhci: Add reset resume quirk for AMD xhci controller.
	iio: gyro: mpu3050: Fix reported temperature value
	iio: tsl2583: Fix division by a zero lux_val
	cdc-wdm: untangle a circular dependency between callback and softint
	KVM: x86: Cancel pvclock_gtod_work on module removal
	mm: fix struct page layout on 32-bit systems
	FDDI: defxx: Make MMIO the configuration default except for EISA
	MIPS: Reinstate platform `__div64_32' handler
	MIPS: Avoid DIVU in `__div64_32' is result would be zero
	MIPS: Avoid handcoded DIVU in `__div64_32' altogether
	thermal/core/fair share: Lock the thermal zone while looping over instances
	f2fs: fix error handling in f2fs_end_enable_verity()
	ARM: 9011/1: centralize phys-to-virt conversion of DT/ATAGS address
	ARM: 9012/1: move device tree mapping out of linear region
	ARM: 9020/1: mm: use correct section size macro to describe the FDT virtual address
	ARM: 9027/1: head.S: explicitly map DT even if it lives in the first physical section
	usb: typec: tcpm: Fix error while calculating PPS out values
	kobject_uevent: remove warning in init_uevent_argv()
	netfilter: conntrack: Make global sysctls readonly in non-init netns
	clk: exynos7: Mark aclk_fsys1_200 as critical
	nvme: do not try to reconfigure APST when the controller is not live
	ASoC: rsnd: check all BUSIF status when error
	Linux 5.4.120

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Iab57c5f8164542fa2a5bdc2c9a8f516ccfd67b5a
This commit is contained in:
Greg Kroah-Hartman 2021-05-19 10:41:47 +02:00
commit 5317188981
159 changed files with 1032 additions and 499 deletions

View File

@ -45,9 +45,14 @@ fffe8000 fffeffff DTCM mapping area for platforms with
fffe0000 fffe7fff ITCM mapping area for platforms with
ITCM mounted inside the CPU.
ffc00000 ffefffff Fixmap mapping region. Addresses provided
ffc80000 ffefffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here.
ffc00000 ffc7ffff Guard region
ff800000 ffbfffff Permanent, fixed read-only mapping of the
firmware provided DT blob
fee00000 feffffff Mapping of PCI I/O space. This is a static
mapping within the vmalloc space.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 119
SUBLEVEL = 120
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -7,6 +7,18 @@
#include <uapi/asm/page.h>
#ifdef CONFIG_ARC_HAS_PAE40
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
#else /* CONFIG_ARC_HAS_PAE40 */
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#define PAGE_MASK_PHYS PAGE_MASK
#endif /* CONFIG_ARC_HAS_PAE40 */
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)

View File

@ -108,8 +108,8 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@ -133,13 +133,7 @@
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
#ifdef CONFIG_ARC_HAS_PAE40
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 40
#else
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)

View File

@ -33,5 +33,4 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#endif /* _UAPI__ASM_ARC_PAGE_H */

View File

@ -165,7 +165,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
cmp r8, NR_syscalls
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@ -243,7 +243,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
cmp r8, NR_syscalls
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call

View File

@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
unsigned int off;
unsigned long vaddr;
struct vm_struct *area;
phys_addr_t off, end;
phys_addr_t end;
pgprot_t prot = __pgprot(flags);
/* Don't allow wraparound, zero size */
@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
/* Mappings have to be page-aligned */
off = paddr & ~PAGE_MASK;
paddr &= PAGE_MASK;
paddr &= PAGE_MASK_PHYS;
size = PAGE_ALIGN(end + 1) - paddr;
/*

View File

@ -597,7 +597,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);

View File

@ -2,7 +2,7 @@
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#define FIXADDR_START 0xffc00000UL
#define FIXADDR_START 0xffc80000UL
#define FIXADDR_END 0xfff00000UL
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)

View File

@ -67,6 +67,10 @@
*/
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
#define FDT_FIXED_BASE UL(0xff800000)
#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Allow 16MB-aligned ioremap pages
@ -107,6 +111,7 @@ extern unsigned long vectors_base;
#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
#endif /* !CONFIG_MMU */

View File

@ -9,12 +9,12 @@
#ifdef CONFIG_OF
extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
{
return NULL;
}

View File

@ -2,11 +2,11 @@
void convert_to_tag_list(struct tag *tags);
#ifdef CONFIG_ATAGS
const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
const struct machine_desc *setup_machine_tags(void *__atags_vaddr,
unsigned int machine_nr);
#else
static inline const struct machine_desc * __init __noreturn
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr)
{
early_print("no ATAGS support: can't continue\n");
while (true);

View File

@ -176,7 +176,7 @@ static void __init squash_mem_tags(struct tag *tag)
}
const struct machine_desc * __init
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
setup_machine_tags(void *atags_vaddr, unsigned int machine_nr)
{
struct tag *tags = (struct tag *)&default_tags;
const struct machine_desc *mdesc = NULL, *p;
@ -197,8 +197,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
if (!mdesc)
return NULL;
if (__atags_pointer)
tags = phys_to_virt(__atags_pointer);
if (atags_vaddr)
tags = atags_vaddr;
else if (mdesc->atag_offset)
tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);

View File

@ -203,12 +203,12 @@ static const void * __init arch_get_next_mach(const char *const **match)
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob
* @dt_virt: virtual address of dt blob
*
* If a dtb was passed to the kernel in r2, then use it to choose the
* correct machine_desc and to setup the system.
*/
const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
{
const struct machine_desc *mdesc, *mdesc_best = NULL;
@ -221,7 +221,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
mdesc_best = &__mach_desc_GENERIC_DT;
#endif
if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
if (!dt_virt || !early_init_dt_verify(dt_virt))
return NULL;
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);

View File

@ -274,11 +274,10 @@ __create_page_tables:
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
subne r3, r0, r8
addne r3, r3, #PAGE_OFFSET
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
orrne r6, r7, r0
cmp r2, #0
ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
addne r3, r3, r4
orrne r6, r7, r0, lsl #SECTION_SHIFT
strne r6, [r3], #1 << PMD_ORDER
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]

View File

@ -883,7 +883,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
if (!bp->overflow_handler)
if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}

View File

@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@ -1089,19 +1090,27 @@ static struct notifier_block arm_restart_nb = {
void __init setup_arch(char **cmdline_p)
{
const struct machine_desc *mdesc;
const struct machine_desc *mdesc = NULL;
void *atags_vaddr = NULL;
if (__atags_pointer)
atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (atags_vaddr) {
mdesc = setup_machine_fdt(atags_vaddr);
if (mdesc)
memblock_reserve(__atags_pointer,
fdt_totalsize(atags_vaddr));
}
if (!mdesc)
mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
if (!mdesc) {
early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
__atags_pointer);
if (__atags_pointer)
early_print(" r2[]=%*ph\n", 16,
phys_to_virt(__atags_pointer));
early_print(" r2[]=%*ph\n", 16, atags_vaddr);
dump_machine_table();
}

View File

@ -274,7 +274,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
/* reserve memory for DMA contiguous allocations */

View File

@ -39,6 +39,8 @@
#include "mm.h"
#include "tcm.h"
extern unsigned long __atags_pointer;
/*
* empty_zero_page is a special page that is used for
* zero-initialized data and COW.
@ -962,7 +964,7 @@ static void __init create_mapping(struct map_desc *md)
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
if (md->type == MT_DEVICE &&
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
@ -1352,6 +1354,15 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
if (__atags_pointer) {
/* create a read-only mapping of the device tree */
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
map.virtual = FDT_FIXED_BASE;
map.length = FDT_FIXED_SIZE;
map.type = MT_ROM;
create_mapping(&map);
}
/*
* Map the kernel if it is XIP.
* It is always first in the modulearea.
@ -1512,8 +1523,7 @@ static void __init map_lowmem(void)
}
#ifdef CONFIG_ARM_PV_FIXUP
extern unsigned long __atags_pointer;
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
/*
@ -1526,7 +1536,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
unsigned long pa_pgd;
unsigned int cr, ttbcr;
long long offset;
void *boot_data;
if (!mdesc->pv_fixup)
return;
@ -1543,7 +1552,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
*/
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
pa_pgd = __pa(swapper_pg_dir);
boot_data = __va(__atags_pointer);
barrier();
pr_info("Switching physical address space to 0x%08llx\n",
@ -1579,7 +1587,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
* needs to be assembly. It's fairly simple, as we're using the
* temporary tables setup by the initial assembly code.
*/
lpae_pgtables_remap(offset, pa_pgd, boot_data);
lpae_pgtables_remap(offset, pa_pgd);
/* Re-enable the caches and cacheable TLB walks */
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));

View File

@ -39,8 +39,8 @@ ENTRY(lpae_pgtables_remap_asm)
/* Update level 2 entries for the boot data */
add r7, r2, #0x1000
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
bic r7, r7, #(1 << L2_ORDER) - 1
movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
add r7, r7, r3
ldrd r4, r5, [r7]
adds r4, r4, r0
adc r5, r5, r1

View File

@ -14,16 +14,20 @@
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
/* Used only at module load time. */
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
unsigned int next_got_entry; /* index of next available got entry */
/* Used at module run and cleanup time. */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
unsigned int next_got_entry; /* index of next available got entry */
void *opd_addr; /* symbolize uses .opd to get to actual function */
unsigned long opd_size;
};
#define MODULE_PROC_FAMILY "ia64"

View File

@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
struct mod_arch_specific *mas = &mod->arch;
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
if (mod->arch.unwind)
if (mas->unwind)
register_unwind_table(mod);
/*
* ".opd" was already relocated to the final destination. Store
* it's address for use in symbolizer.
*/
mas->opd_addr = (void *)mas->opd->sh_addr;
mas->opd_size = mas->opd->sh_size;
/*
* Module relocation was already done at this point. Section
* headers are about to be deleted. Wipe out load-time context.
*/
mas->core_plt = NULL;
mas->init_plt = NULL;
mas->got = NULL;
mas->opd = NULL;
mas->unwind = NULL;
mas->gp = 0;
mas->next_got_entry = 0;
return 0;
}
@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
Elf64_Shdr *opd = mod->arch.opd;
struct mod_arch_specific *mas = &mod->arch;
if (ptr < (void *)opd->sh_addr ||
ptr >= (void *)(opd->sh_addr + opd->sh_size))
if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
return ptr;
return dereference_function_descriptor(ptr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2000, 2004 Maciej W. Rozycki
* Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
@ -9,25 +9,18 @@
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
#include <asm-generic/div64.h>
#include <asm/bitsperlong.h>
#if BITS_PER_LONG == 64
#include <linux/types.h>
#if BITS_PER_LONG == 32
/*
* No traps on overflows for any of these...
*/
#define __div64_32(n, base) \
({ \
#define do_div64_32(res, high, low, base) ({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
unsigned long __high, __low; \
unsigned long long __n; \
\
__high = *__n >> 32; \
__low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
@ -51,18 +44,48 @@
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
" bnez %4, 0b\n\t" \
" srl %5, %1, 0x1f\n\t" \
" bnez %4, 0b \n" \
" srl %5, %1, 0x1f \n" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
: "Jr" (base), "0" (__high), "1" (__low)); \
: "Jr" (base), "0" (high), "1" (low)); \
\
(__n) = __quot32; \
(res) = __quot32; \
__mod32; \
})
#endif /* BITS_PER_LONG == 64 */
#define __div64_32(n, base) ({ \
unsigned long __upper, __low, __high, __radix; \
unsigned long long __quot; \
unsigned long long __div; \
unsigned long __mod; \
\
__div = (*n); \
__radix = (base); \
\
__high = __div >> 32; \
__low = __div; \
\
if (__high < __radix) { \
__upper = __high; \
__high = 0; \
} else { \
__upper = __high % __radix; \
__high /= __radix; \
} \
\
__mod = do_div64_32(__low, __upper, __low, __radix); \
\
__quot = __high; \
__quot = __quot << 32 | __low; \
(*n) = __quot; \
__mod; \
})
#endif /* BITS_PER_LONG == 32 */
#include <asm-generic/div64.h>
#endif /* __ASM_DIV64_H */

View File

@ -1057,7 +1057,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock(&tbl->pools[i].lock);
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
iommu_table_release_pages(tbl);
@ -1085,7 +1085,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
spin_lock(&tbl->pools[i].lock);
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);

View File

@ -1254,6 +1254,9 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
@ -1266,9 +1269,6 @@ void start_secondary(void *unused)
if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
shared_caches = true;
set_numa_node(numa_cpu_lookup_table[cpu]);
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);

View File

@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
@ -221,11 +222,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
static int __do_stf_barrier_fixups(void *data)
{
enum stf_barrier_type *types = data;
do_stf_entry_barrier_fixups(*types);
do_stf_exit_barrier_fixups(*types);
return 0;
}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
do_stf_entry_barrier_fixups(types);
do_stf_exit_barrier_fixups(types);
/*
* The call to the fallback entry flush, and the fallback/sync-ori exit
* flush can not be safely patched in/out while other CPUs are executing
* them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
* spin in the stop machine core with interrupts hard disabled.
*/
stop_machine(__do_stf_barrier_fixups, &types, NULL);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@ -278,8 +293,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
: "unknown");
}
void do_entry_flush_fixups(enum l1d_flush_type types)
static int __do_entry_flush_fixups(void *data)
{
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@ -330,6 +346,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
return 0;
}
void do_entry_flush_fixups(enum l1d_flush_type types)
{
/*
* The call to the fallback flush can not be safely patched in/out while
* other CPUs are executing it. So call __do_entry_flush_fixups() on one
* CPU while all other CPUs spin in the stop machine core with interrupts
* hard disabled.
*/
stop_machine(__do_entry_flush_fixups, &types, NULL);
}
void do_rfi_flush_fixups(enum l1d_flush_type types)

View File

@ -91,9 +91,6 @@ static void rtas_stop_self(void)
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");

View File

@ -51,7 +51,7 @@ int riscv_hartid_to_cpuid(int hartid)
return i;
pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
return i;
return -ENOENT;
}
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)

View File

@ -391,8 +391,6 @@ struct kvm_mmu {
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
gpa_t root_cr3;
union kvm_mmu_role mmu_role;
@ -944,7 +942,6 @@ struct kvm_arch {
struct kvm_vm_stat {
ulong mmu_shadow_zapped;
ulong mmu_pte_write;
ulong mmu_pte_updated;
ulong mmu_pde_zapped;
ulong mmu_flooded;
ulong mmu_recycled;

View File

@ -2243,13 +2243,6 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
{
}
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *pte)
{
WARN_ON(1);
}
#define KVM_PAGE_ARRAY_NR 16
struct kvm_mmu_pages {
@ -4356,7 +4349,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
@ -4935,7 +4927,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
context->update_pte = paging64_update_pte;
context->shadow_root_level = level;
context->direct_map = false;
}
@ -4964,7 +4955,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
context->update_pte = paging32_update_pte;
context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
@ -5039,7 +5029,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
context->direct_map = true;
context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
@ -5172,7 +5161,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->gva_to_gpa = ept_gva_to_gpa;
context->sync_page = ept_sync_page;
context->invlpg = ept_invlpg;
context->update_pte = ept_update_pte;
context->root_level = PT64_ROOT_4LEVEL;
context->direct_map = false;
context->mmu_role.as_u64 = new_role.as_u64;
@ -5312,19 +5300,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
const void *new)
{
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
}
++vcpu->kvm->stat.mmu_pte_updated;
vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
{
if (!is_shadow_present_pte(old))
@ -5490,14 +5465,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
local_flush = true;
while (npte--) {
u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
!((sp->role.word ^ base_role)
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (gentry && sp->role.level != PG_LEVEL_4K)
++vcpu->kvm->stat.mmu_pde_zapped;
if (need_remote_flush(entry, *spte))
remote_flush = true;
++spte;

View File

@ -208,7 +208,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "l1d_flush", VCPU_STAT(l1d_flush) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
{ "mmu_flooded", VM_STAT(mmu_flooded) },
{ "mmu_recycled", VM_STAT(mmu_recycled) },
@ -7357,6 +7356,7 @@ void kvm_arch_exit(void)
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();

View File

@ -2210,10 +2210,9 @@ static void bfq_remove_request(struct request_queue *q,
}
static bool bfq_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct bfq_data *bfqd = q->elevator->elevator_data;
struct request *free = NULL;
/*

View File

@ -334,14 +334,16 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
struct blk_mq_ctx *ctx;
struct blk_mq_hw_ctx *hctx;
bool ret = false;
enum hctx_type type;
if (e && e->type->ops.bio_merge)
return e->type->ops.bio_merge(hctx, bio, nr_segs);
return e->type->ops.bio_merge(q, bio, nr_segs);
ctx = blk_mq_get_ctx(q);
hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
type = hctx->type;
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
!list_empty_careful(&ctx->rq_lists[type])) {

View File

@ -2970,10 +2970,12 @@ EXPORT_SYMBOL(blk_mq_init_allocated_queue);
/* tags can _not_ be used after returning from blk_mq_exit_queue */
void blk_mq_exit_queue(struct request_queue *q)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_tag_set *set = q->tag_set;
blk_mq_del_queue_tag_set(q);
/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
blk_mq_del_queue_tag_set(q);
}
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)

View File

@ -562,11 +562,12 @@ static void kyber_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
}
}
static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool kyber_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx);
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];

View File

@ -459,10 +459,9 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
return ELEVATOR_NO_MERGE;
}
static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio,
static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs)
{
struct request_queue *q = hctx->queue;
struct deadline_data *dd = q->elevator->elevator_data;
struct request *free = NULL;
bool ret;

View File

@ -706,6 +706,7 @@ int acpi_device_add(struct acpi_device *device,
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}

View File

@ -1610,6 +1610,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@ -1777,10 +1778,12 @@ int pm_runtime_force_suspend(struct device *dev)
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
if (pm_runtime_need_not_resume(dev))
if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
else
} else {
__update_runtime_status(dev, RPM_SUSPENDED);
dev->power.needs_force_resume = 1;
}
return 0;
@ -1807,7 +1810,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
@ -1826,6 +1829,7 @@ int pm_runtime_force_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
out:
dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}

View File

@ -2016,7 +2016,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* config ref and try to destroy the workqueue from inside the work
* queue.
*/
flush_workqueue(nbd->recv_workq);
if (nbd->recv_workq)
flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);

View File

@ -962,6 +962,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
if (nr_commands !=
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
rc = -EFAULT;
tpm_buf_destroy(&buf);
goto out;
}

View File

@ -620,16 +620,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
cap_t cap;
int ret;
/* TPM 2.0 */
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
/* TPM 1.2 */
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
if (chip->flags & TPM_CHIP_FLAG_TPM2)
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
else
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
@ -1037,12 +1035,20 @@ int tpm_tis_resume(struct device *dev)
if (ret)
return ret;
/* TPM 1.2 requires self-test on resume. This function actually returns
/*
* TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
tpm1_do_selftest(chip);
release_locality(chip, 0);
}
return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);

View File

@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
CLK_IS_CRITICAL, 0),
/*
* This clock is required for the CMU_FSYS1 registers access, keep it
* enabled permanently until proper runtime PM support is added.
*/
GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,

View File

@ -2050,6 +2050,10 @@ static void commit_planes_for_stream(struct dc *dc,
plane_state->triplebuffer_flips = true;
}
}
if (update_type == UPDATE_TYPE_FULL) {
/* force vsync flip when reconfiguring pipes to prevent underflow */
plane_state->flip_immediate = false;
}
}
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright 2012-17 Advanced Micro Devices, Inc.
* Copyright 2012-2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -179,11 +179,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
else
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
*/
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
value = 1;
} else
value = 0;
if (pipe_dest->htotal != 0) {
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
value = 1;
} else
value = 0;
}
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
}

View File

@ -181,7 +181,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
chunk = roundup(chunk, tile_row_pages(obj));
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);

View File

@ -1554,6 +1554,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;

View File

@ -2136,11 +2136,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info =
kcalloc(1, sizeof(struct radeon_pm_clock_info),
GFP_KERNEL);
/* avoid memory leaks from invalid modes or unknown frev. */
if (!rdev->pm.power_state[state_index].clock_info) {
rdev->pm.power_state[state_index].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info),
GFP_KERNEL);
}
if (!rdev->pm.power_state[state_index].clock_info)
return state_index;
goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@ -2259,17 +2262,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break;
}
}
out:
/* free any unused clock_info allocation. */
if (state_index && state_index < num_modes) {
kfree(rdev->pm.power_state[state_index].clock_info);
rdev->pm.power_state[state_index].clock_info = NULL;
}
/* last mode is usually default */
if (rdev->pm.default_power_state_index == -1) {
if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
rdev->pm.power_state[state_index].flags &=
rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
rdev->pm.power_state[state_index].misc = 0;
rdev->pm.power_state[state_index].misc2 = 0;
rdev->pm.power_state[state_index - 1].misc = 0;
rdev->pm.power_state[state_index - 1].misc2 = 0;
}
return state_index;
}

View File

@ -1720,6 +1720,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@ -1729,6 +1730,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@ -1736,6 +1738,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
if (!radeon_crtc->connector)
continue;
radeon_connector = to_radeon_connector(radeon_crtc->connector);
if (radeon_connector->pixelclock_for_modeset > 297000)
rdev->pm.dpm.high_pixelclock_count++;
}
}
}

View File

@ -3002,6 +3002,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
if (rdev->pm.dpm.high_pixelclock_count > 1)
disable_sclk_switching = true;
}
if (rps->vce_active) {

View File

@ -209,9 +209,9 @@ int occ_update_response(struct occ *occ)
return rc;
/* limit the maximum rate of polling the OCC */
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
if (time_after(jiffies, occ->next_update)) {
rc = occ_poll(occ);
occ->last_update = jiffies;
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
} else {
rc = occ->last_error;
}
@ -1089,6 +1089,7 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
occ_parse_poll_response(occ);
rc = occ_setup_sensor_attrs(occ);

View File

@ -99,7 +99,7 @@ struct occ {
u8 poll_cmd_data; /* to perform OCC poll command */
int (*send_cmd)(struct occ *occ, u8 *cmd);
unsigned long last_update;
unsigned long next_update;
struct mutex lock; /* lock OCC access */
struct device *hwmon;

View File

@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
sizeof(rdwr_arg)))
return -EFAULT;
/* Put an arbitrary limit on the number of messages that can
* be sent at once */
if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
return -EINVAL;
/*
* Put an arbitrary limit on the number of messages that can
* be sent at once
*/
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
return -EINVAL;

View File

@ -271,7 +271,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
/* The temperature scaling is (x+23000)/280 Celsius */
/*
* The temperature scaling is (x+23000)/280 Celsius
* for the "best fit straight line" temperature range
* of -30C..85C. The 23000 includes room temperature
* offset of +35C, 280 is the precision scale and x is
* the 16-bit signed integer reported by hardware.
*
* Temperature value itself represents temperature of
* the sensor die.
*/
*val = 23000;
return IIO_VAL_INT;
default:
@ -328,7 +337,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
goto out_read_raw_unlock;
}
*val = be16_to_cpu(raw_val);
*val = (s16)be16_to_cpu(raw_val);
ret = IIO_VAL_INT;
goto out_read_raw_unlock;

View File

@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
/* Avoid division by zero of lux_value later on */
if (lux_val == 0) {
dev_err(&chip->client->dev,
"%s: lux_val of 0 will produce out of range trim_value\n",
__func__);
return -ENODATA;
}
gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
* chip->als_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {

View File

@ -158,6 +158,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
if (ret < 0) {
dev_err(&client->dev, "cannot send start measurement command");
pm_runtime_put_noidle(&client->dev);
return ret;
}

View File

@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/list.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/interrupt.h>
@ -254,8 +253,6 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void);
static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
u8 fxn, u64 *value, bool is_write);
static bool amd_iommu_pre_enabled = true;
@ -1675,53 +1672,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
static void init_iommu_perf_ctr(struct amd_iommu *iommu)
{
int retry;
u64 val;
struct pci_dev *pdev = iommu->dev;
u64 val = 0xabcd, val2 = 0, save_reg, save_src;
if (!iommu_feature(iommu, FEATURE_PC))
return;
amd_iommu_pc_present = true;
/* save the value to restore, if writable */
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false) ||
iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, false))
goto pc_false;
/*
* Disable power gating by programing the performance counter
* source to 20 (i.e. counts the reads and writes from/to IOMMU
* Reserved Register [MMIO Offset 1FF8h] that are ignored.),
* which never get incremented during this init phase.
* (Note: The event is also deprecated.)
*/
val = 20;
if (iommu_pc_get_set_reg(iommu, 0, 0, 8, &val, true))
goto pc_false;
/* Check if the performance counters can be written to */
val = 0xabcd;
for (retry = 5; retry; retry--) {
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true) ||
iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false) ||
val2)
break;
/* Wait about 20 msec for power gating to disable and retry. */
msleep(20);
}
/* restore */
if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true) ||
iommu_pc_get_set_reg(iommu, 0, 0, 8, &save_src, true))
goto pc_false;
if (val != val2)
goto pc_false;
pci_info(pdev, "IOMMU performance counters supported\n");
val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
@ -1729,11 +1689,6 @@ static void __init init_iommu_perf_ctr(struct amd_iommu *iommu)
iommu->max_counters = (u8) ((val >> 7) & 0xf);
return;
pc_false:
pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
amd_iommu_pc_present = false;
return;
}
static ssize_t amd_iommu_show_cap(struct device *dev,

View File

@ -1418,6 +1418,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
int i;
int putidx;
cdev->tx_skb = NULL;
/* Generate ID field for TX buffer Element */
/* Common to all supported M_CAN versions */
if (cf->can_id & CAN_EFF_FLAG) {
@ -1534,7 +1536,6 @@ static void m_can_tx_work_queue(struct work_struct *ws)
tx_work);
m_can_tx_handler(cdev);
cdev->tx_skb = NULL;
}
static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,

View File

@ -125,7 +125,10 @@ enum board_idx {
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
NETXTREME_C_VF_HV,
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
};
/* indexed by enum above */
@ -173,7 +176,10 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@ -225,15 +231,25 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@ -263,7 +279,8 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)

View File

@ -803,7 +803,7 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
return err;
}
static inline void enic_queue_wq_skb(struct enic *enic,
static inline int enic_queue_wq_skb(struct enic *enic,
struct vnic_wq *wq, struct sk_buff *skb)
{
unsigned int mss = skb_shinfo(skb)->gso_size;
@ -849,6 +849,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
wq->to_use = buf->next;
dev_kfree_skb(skb);
}
return err;
}
/* netif_tx_lock held, process context with BHs disabled, or BH */
@ -892,7 +893,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (enic_queue_wq_skb(enic, wq, skb))
goto error;
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
@ -900,6 +902,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
error:
spin_unlock(&enic->wq_lock[txq_map]);
return NETDEV_TX_OK;

View File

@ -539,8 +539,8 @@ static int hns3_nic_net_stop(struct net_device *netdev)
if (h->ae_algo->ops->set_timer_task)
h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
hns3_nic_net_down(netdev);
@ -796,7 +796,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
* hardware will not do the checksum offload when udp dest port is
* 4789 or 6081.
* 4789, 4790 or 6081.
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
@ -806,7 +806,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
if (!(!skb->encapsulation &&
(l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
l4.udp->dest == htons(GENEVE_UDP_PORT))))
l4.udp->dest == htons(GENEVE_UDP_PORT) ||
l4.udp->dest == htons(4790))))
return false;
skb_checksum_help(skb);
@ -4280,6 +4281,11 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_err(kinfo->netdev, "device is not initialized yet\n");
return -EFAULT;
}
clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
if (netif_running(kinfo->netdev)) {

View File

@ -753,8 +753,9 @@ static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure IGU,EGU error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);

View File

@ -33,7 +33,8 @@
#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
#define HCLGE_IGU_ERR_INT_EN 0x0000066F
#define HCLGE_IGU_ERR_INT_EN 0x0000000F
#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F

View File

@ -455,7 +455,7 @@ static void hclge_get_link_mode(struct hclge_vport *vport,
unsigned long advertising;
unsigned long supported;
unsigned long send_data;
u8 msg_data[10];
u8 msg_data[10] = {};
u8 dest_vfid;
advertising = hdev->hw.mac.advertising[0];

View File

@ -255,6 +255,8 @@ void hclge_mac_start_phy(struct hclge_dev *hdev)
if (!phydev)
return;
phy_loopback(phydev, false);
phy_start(phydev);
}

View File

@ -1893,8 +1893,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
I40E_PHY_TYPE_2_5GBASE_T = 0x30,
I40E_PHY_TYPE_5GBASE_T = 0x31,
I40E_PHY_TYPE_2_5GBASE_T = 0x26,
I40E_PHY_TYPE_5GBASE_T = 0x27,
I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS = 0x30,
I40E_PHY_TYPE_5GBASE_T_LINK_STATUS = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,

View File

@ -377,6 +377,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
i40e_client_del_instance(pf);
return;
}
}
}

View File

@ -1156,8 +1156,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_2_5GBASE_T:
case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;

View File

@ -839,8 +839,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_2_5GBASE_T:
case I40E_PHY_TYPE_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_2_5GBASE_T_LINK_STATUS:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@ -1406,7 +1406,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
memset(&config, 0, sizeof(config));
config.phy_type = abilities.phy_type;
config.abilities = abilities.abilities;
config.abilities = abilities.abilities |
I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;

View File

@ -253,11 +253,8 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
/* Offset for 2.5G/5G PHY Types value to bit number conversion */
#define I40E_PHY_TYPE_OFFSET2 (-10)
#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
I40E_PHY_TYPE_OFFSET2)
#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
I40E_PHY_TYPE_OFFSET2)
#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T)
#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {

View File

@ -3906,8 +3906,6 @@ static void iavf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);

View File

@ -1315,7 +1315,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->protocol = eth_type_trans(skb, netdev);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
RX_DMA_VID(trxd.rxd3))
(trxd.rxd2 & RX_DMA_VTAG))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);

View File

@ -293,6 +293,7 @@
#define RX_DMA_LSO BIT(30)
#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
#define RX_DMA_VTAG BIT(15)
/* QDMA descriptor rxd3 */
#define RX_DMA_VID(_x) ((_x) & 0xfff)

View File

@ -351,6 +351,8 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = gmac;
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
plat_dat->multicast_filter_bins = 0;
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (err)

View File

@ -40,17 +40,20 @@ config DEFXX
config DEFXX_MMIO
bool
prompt "Use MMIO instead of PIO" if PCI || EISA
prompt "Use MMIO instead of IOP" if PCI || EISA
depends on DEFXX
default n if PCI || EISA
default n if EISA
default y
---help---
This instructs the driver to use EISA or PCI memory-mapped I/O
(MMIO) as appropriate instead of programmed I/O ports (PIO).
(MMIO) as appropriate instead of programmed I/O ports (IOP).
Enabling this gives an improvement in processing time in parts
of the driver, but it may cause problems with EISA (DEFEA)
adapters. TURBOchannel does not have the concept of I/O ports,
so MMIO is always used for these (DEFTA) adapters.
of the driver, but it requires a memory window to be configured
for EISA (DEFEA) adapters that may not always be available.
Conversely some PCIe host bridges do not support IOP, so MMIO
may be required to access PCI (DEFPA) adapters on downstream PCI
buses with some systems. TURBOchannel does not have the concept
of I/O ports, so MMIO is always used for these (DEFTA) adapters.
If unsure, say N.

View File

@ -450,6 +450,10 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
/* MT76x0 GTK offloading does not work with more than one VIF */
if (is_mt76x0(dev) && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
msta = sta ? (struct mt76x02_sta *)sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;

View File

@ -599,8 +599,10 @@ qtnf_event_handle_external_auth(struct qtnf_vif *vif,
return 0;
if (ev->ssid_len) {
memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len);
auth.ssid.ssid_len = ev->ssid_len;
int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN);
memcpy(auth.ssid.ssid, ev->ssid, len);
auth.ssid.ssid_len = len;
}
auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);

View File

@ -379,16 +379,7 @@ struct wl3501_get_confirm {
u8 mib_value[100];
};
struct wl3501_join_req {
u16 next_blk;
u8 sig_id;
u8 reserved;
struct iw_mgmt_data_rset operational_rset;
u16 reserved2;
u16 timeout;
u16 probe_delay;
u8 timestamp[8];
u8 local_time[8];
struct wl3501_req {
u16 beacon_period;
u16 dtim_period;
u16 cap_info;
@ -401,6 +392,19 @@ struct wl3501_join_req {
struct iw_mgmt_data_rset bss_basic_rset;
};
struct wl3501_join_req {
u16 next_blk;
u8 sig_id;
u8 reserved;
struct iw_mgmt_data_rset operational_rset;
u16 reserved2;
u16 timeout;
u16 probe_delay;
u8 timestamp[8];
u8 local_time[8];
struct wl3501_req req;
};
struct wl3501_join_confirm {
u16 next_blk;
u8 sig_id;
@ -443,16 +447,7 @@ struct wl3501_scan_confirm {
u16 status;
char timestamp[8];
char localtime[8];
u16 beacon_period;
u16 dtim_period;
u16 cap_info;
u8 bss_type;
u8 bssid[ETH_ALEN];
struct iw_mgmt_essid_pset ssid;
struct iw_mgmt_ds_pset ds_pset;
struct iw_mgmt_cf_pset cf_pset;
struct iw_mgmt_ibss_pset ibss_pset;
struct iw_mgmt_data_rset bss_basic_rset;
struct wl3501_req req;
u8 rssi;
};
@ -471,8 +466,10 @@ struct wl3501_md_req {
u16 size;
u8 pri;
u8 service_class;
u8 daddr[ETH_ALEN];
u8 saddr[ETH_ALEN];
struct {
u8 daddr[ETH_ALEN];
u8 saddr[ETH_ALEN];
} addr;
};
struct wl3501_md_ind {
@ -484,8 +481,10 @@ struct wl3501_md_ind {
u8 reception;
u8 pri;
u8 service_class;
u8 daddr[ETH_ALEN];
u8 saddr[ETH_ALEN];
struct {
u8 daddr[ETH_ALEN];
u8 saddr[ETH_ALEN];
} addr;
};
struct wl3501_md_confirm {

View File

@ -469,6 +469,7 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
struct wl3501_md_req sig = {
.sig_id = WL3501_SIG_MD_REQ,
};
size_t sig_addr_len = sizeof(sig.addr);
u8 *pdata = (char *)data;
int rc = -EIO;
@ -484,9 +485,9 @@ static int wl3501_send_pkt(struct wl3501_card *this, u8 *data, u16 len)
goto out;
}
rc = 0;
memcpy(&sig.daddr[0], pdata, 12);
pktlen = len - 12;
pdata += 12;
memcpy(&sig.addr, pdata, sig_addr_len);
pktlen = len - sig_addr_len;
pdata += sig_addr_len;
sig.data = bf;
if (((*pdata) * 256 + (*(pdata + 1))) > 1500) {
u8 addr4[ETH_ALEN] = {
@ -589,7 +590,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
struct wl3501_join_req sig = {
.sig_id = WL3501_SIG_JOIN_REQ,
.timeout = 10,
.ds_pset = {
.req.ds_pset = {
.el = {
.id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET,
.len = 1,
@ -598,7 +599,7 @@ static int wl3501_mgmt_join(struct wl3501_card *this, u16 stas)
},
};
memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72);
memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req));
return wl3501_esbq_exec(this, &sig, sizeof(sig));
}
@ -666,35 +667,37 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
if (sig.status == WL3501_STATUS_SUCCESS) {
pr_debug("success");
if ((this->net_type == IW_MODE_INFRA &&
(sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
(sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) ||
(this->net_type == IW_MODE_ADHOC &&
(sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
(sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) ||
this->net_type == IW_MODE_AUTO) {
if (!this->essid.el.len)
matchflag = 1;
else if (this->essid.el.len == 3 &&
!memcmp(this->essid.essid, "ANY", 3))
matchflag = 1;
else if (this->essid.el.len != sig.ssid.el.len)
else if (this->essid.el.len != sig.req.ssid.el.len)
matchflag = 0;
else if (memcmp(this->essid.essid, sig.ssid.essid,
else if (memcmp(this->essid.essid, sig.req.ssid.essid,
this->essid.el.len))
matchflag = 0;
else
matchflag = 1;
if (matchflag) {
for (i = 0; i < this->bss_cnt; i++) {
if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) {
if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid,
sig.req.bssid)) {
matchflag = 0;
break;
}
}
}
if (matchflag && (i < 20)) {
memcpy(&this->bss_set[i].beacon_period,
&sig.beacon_period, 73);
memcpy(&this->bss_set[i].req,
&sig.req, sizeof(sig.req));
this->bss_cnt++;
this->rssi = sig.rssi;
this->bss_set[i].rssi = sig.rssi;
}
}
} else if (sig.status == WL3501_STATUS_TIMEOUT) {
@ -886,19 +889,19 @@ static void wl3501_mgmt_join_confirm(struct net_device *dev, u16 addr)
if (this->join_sta_bss < this->bss_cnt) {
const int i = this->join_sta_bss;
memcpy(this->bssid,
this->bss_set[i].bssid, ETH_ALEN);
this->chan = this->bss_set[i].ds_pset.chan;
this->bss_set[i].req.bssid, ETH_ALEN);
this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
&this->bss_set[i].ssid.el);
&this->bss_set[i].req.ssid.el);
wl3501_mgmt_auth(this);
}
} else {
const int i = this->join_sta_bss;
memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN);
this->chan = this->bss_set[i].ds_pset.chan;
memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN);
this->chan = this->bss_set[i].req.ds_pset.chan;
iw_copy_mgmt_info_element(&this->keep_essid.el,
&this->bss_set[i].ssid.el);
&this->bss_set[i].req.ssid.el);
wl3501_online(dev);
}
} else {
@ -980,7 +983,8 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev,
} else {
skb->dev = dev;
skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */
skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12);
skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr,
sizeof(sig.addr));
wl3501_receive(this, skb->data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, dev);
@ -1573,30 +1577,30 @@ static int wl3501_get_scan(struct net_device *dev, struct iw_request_info *info,
for (i = 0; i < this->bss_cnt; ++i) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN);
memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
iwe.u.data.length = this->bss_set[i].ssid.el.len;
iwe.u.data.length = this->bss_set[i].req.ssid.el.len;
current_ev = iwe_stream_add_point(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe,
this->bss_set[i].ssid.essid);
this->bss_set[i].req.ssid.essid);
iwe.cmd = SIOCGIWMODE;
iwe.u.mode = this->bss_set[i].bss_type;
iwe.u.mode = this->bss_set[i].req.bss_type;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_UINT_LEN);
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = this->bss_set[i].ds_pset.chan;
iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan;
iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_FREQ_LEN);
iwe.cmd = SIOCGIWENCODE;
if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;

View File

@ -2414,7 +2414,8 @@ static void nvme_set_latency_tolerance(struct device *dev, s32 val)
if (ctrl->ps_max_latency_us != latency) {
ctrl->ps_max_latency_us = latency;
nvme_configure_apst(ctrl);
if (ctrl->state == NVME_CTRL_LIVE)
nvme_configure_apst(ctrl);
}
}

View File

@ -271,7 +271,7 @@ static int iproc_msi_irq_domain_alloc(struct irq_domain *domain,
NULL, NULL);
}
return hwirq;
return 0;
}
static void iproc_msi_irq_domain_free(struct irq_domain *domain,

View File

@ -604,6 +604,7 @@ static int __init pci_epf_test_init(void)
ret = pci_epf_register_driver(&test_driver);
if (ret) {
destroy_workqueue(kpcitest_workqueue);
pr_err("Failed to register pci epf test driver --> %d\n", ret);
return ret;
}
@ -614,6 +615,8 @@ module_init(pci_epf_test_init);
static void __exit pci_epf_test_exit(void)
{
if (kpcitest_workqueue)
destroy_workqueue(kpcitest_workqueue);
pci_epf_unregister_driver(&test_driver);
}
module_exit(pci_epf_test_exit);

View File

@ -2299,6 +2299,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
pci_set_of_node(dev);
if (pci_setup_device(dev)) {
pci_release_of_node(dev);
pci_bus_put(dev->bus);
kfree(dev);
return NULL;

View File

@ -55,7 +55,7 @@ static void exynos_irq_mask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned int mask;
unsigned long flags;
spin_lock_irqsave(&bank->slock, flags);
@ -83,7 +83,7 @@ static void exynos_irq_unmask(struct irq_data *irqd)
struct exynos_irq_chip *our_chip = to_exynos_irq_chip(chip);
struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(irqd);
unsigned long reg_mask = our_chip->eint_mask + bank->eint_offset;
unsigned long mask;
unsigned int mask;
unsigned long flags;
/*
@ -474,7 +474,7 @@ static void exynos_irq_eint0_15(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
static inline void exynos_irq_demux_eint(unsigned long pend,
static inline void exynos_irq_demux_eint(unsigned int pend,
struct irq_domain *domain)
{
unsigned int irq;
@ -491,8 +491,8 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
unsigned long pend;
unsigned long mask;
unsigned int pend;
unsigned int mask;
int i;
chained_irq_enter(chip, desc);

View File

@ -857,6 +857,7 @@ static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
dev_err(glink->dev,
"no intent found for channel %s intent %d",
channel->name, liid);
ret = -ENOENT;
goto advance_rx;
}
}

View File

@ -265,7 +265,11 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t)
t->tm_min = bcd2bin(regs[DS1307_REG_MIN] & 0x7f);
tmp = regs[DS1307_REG_HOUR] & 0x3f;
t->tm_hour = bcd2bin(tmp);
t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
/* rx8130 is bit position, not BCD */
if (ds1307->type == rx_8130)
t->tm_wday = fls(regs[DS1307_REG_WDAY] & 0x7f);
else
t->tm_wday = bcd2bin(regs[DS1307_REG_WDAY] & 0x07) - 1;
t->tm_mday = bcd2bin(regs[DS1307_REG_MDAY] & 0x3f);
tmp = regs[DS1307_REG_MONTH] & 0x1f;
t->tm_mon = bcd2bin(tmp) - 1;
@ -312,7 +316,11 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t)
regs[DS1307_REG_SECS] = bin2bcd(t->tm_sec);
regs[DS1307_REG_MIN] = bin2bcd(t->tm_min);
regs[DS1307_REG_HOUR] = bin2bcd(t->tm_hour);
regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
/* rx8130 is bit position, not BCD */
if (ds1307->type == rx_8130)
regs[DS1307_REG_WDAY] = 1 << t->tm_wday;
else
regs[DS1307_REG_WDAY] = bin2bcd(t->tm_wday + 1);
regs[DS1307_REG_MDAY] = bin2bcd(t->tm_mday);
regs[DS1307_REG_MONTH] = bin2bcd(t->tm_mon + 1);

View File

@ -316,6 +316,7 @@ static const struct of_device_id ftm_rtc_match[] = {
{ .compatible = "fsl,lx2160a-ftm-alarm", },
{ },
};
MODULE_DEVICE_TABLE(of, ftm_rtc_match);
static struct platform_driver ftm_rtc_driver = {
.probe = ftm_rtc_probe,

View File

@ -82,6 +82,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
int total_instance = 0;
int cur_trip_level = get_trip_level(tz);
mutex_lock(&tz->lock);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
if (instance->trip != trip)
continue;
@ -110,6 +112,8 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip)
mutex_unlock(&instance->cdev->lock);
thermal_cdev_update(cdev);
}
mutex_unlock(&tz->lock);
return 0;
}

View File

@ -712,14 +712,17 @@ static int thermal_of_populate_bind_params(struct device_node *np,
count = of_count_phandle_with_args(np, "cooling-device",
"#cooling-cells");
if (!count) {
if (count <= 0) {
pr_err("Add a cooling_device property with at least one device\n");
ret = -ENOENT;
goto end;
}
__tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
if (!__tcbp)
if (!__tcbp) {
ret = -ENOMEM;
goto end;
}
for (i = 0; i < count; i++) {
ret = of_parse_phandle_with_args(np, "cooling-device",

View File

@ -321,12 +321,23 @@ static void wdm_int_callback(struct urb *urb)
}
static void kill_urbs(struct wdm_device *desc)
static void poison_urbs(struct wdm_device *desc)
{
/* the order here is essential */
usb_kill_urb(desc->command);
usb_kill_urb(desc->validity);
usb_kill_urb(desc->response);
usb_poison_urb(desc->command);
usb_poison_urb(desc->validity);
usb_poison_urb(desc->response);
}
static void unpoison_urbs(struct wdm_device *desc)
{
/*
* the order here is not essential
* it is symmetrical just to be nice
*/
usb_unpoison_urb(desc->response);
usb_unpoison_urb(desc->validity);
usb_unpoison_urb(desc->command);
}
static void free_urbs(struct wdm_device *desc)
@ -741,11 +752,12 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
kill_urbs(desc);
poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
unpoison_urbs(desc);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
@ -1036,9 +1048,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
@ -1079,9 +1091,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
set_bit(WDM_SUSPENDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
/* callback submits work - order is essential */
kill_urbs(desc);
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
unpoison_urbs(desc);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
@ -1139,7 +1152,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
kill_urbs(desc);
poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
return 0;
@ -1150,6 +1163,7 @@ static int wdm_post_reset(struct usb_interface *intf)
struct wdm_device *desc = wdm_find_device(intf);
int rv;
unpoison_urbs(desc);
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);

View File

@ -3575,9 +3575,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
/* TRSMRCY = 10 msec */
msleep(10);
}
SuspendCleared:
@ -3592,6 +3589,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
/* TRSMRCY = 10 msec */
msleep(10);
}
if (udev->persist_enabled)

View File

@ -112,6 +112,7 @@ struct dwc2_hsotg_req;
* @debugfs: File entry for debugfs file for this endpoint.
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
* @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
* @interval: Interval for periodic endpoints, in frames or microframes.
@ -161,6 +162,7 @@ struct dwc2_hsotg_ep {
unsigned short fifo_index;
unsigned char dir_in;
unsigned char map_dir;
unsigned char index;
unsigned char mc;
u16 interval;

View File

@ -421,7 +421,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
{
struct usb_request *req = &hs_req->req;
usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
}
/*
@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
{
int ret;
hs_ep->map_dir = hs_ep->dir_in;
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;

View File

@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
if (extcon_get_state(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
else
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
omap->edev = edev;
}

View File

@ -138,6 +138,7 @@ static const struct property_entry dwc3_pci_amd_properties[] = {
PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};

View File

@ -1566,7 +1566,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
}
return __dwc3_gadget_kick_transfer(dep);
__dwc3_gadget_kick_transfer(dep);
return 0;
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,

View File

@ -5571,7 +5571,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq;
int retval = -ENODEV;
int retval;
struct fotg210_hcd *fotg210;
if (usb_disabled())
@ -5591,7 +5591,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
dev_err(dev, "failed to create hcd with err %d\n", retval);
dev_err(dev, "failed to create hcd\n");
retval = -ENOMEM;
goto fail_create_hcd;
}

View File

@ -7,8 +7,9 @@
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
/* Up to 16 ms to halt an HC */
#define XHCI_MAX_HALT_USEC (16*1000)
/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
#define XHCI_MAX_HALT_USEC (32 * 1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)

View File

@ -152,8 +152,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
xhci->quirks |= XHCI_DISABLE_SPARSE;
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;

View File

@ -1397,7 +1397,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
{
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
@ -1428,7 +1428,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
* changes max packet sizes.
*/
command = xhci_alloc_command(xhci, true, GFP_KERNEL);
command = xhci_alloc_command(xhci, true, mem_flags);
if (!command)
return -ENOMEM;
@ -1524,7 +1524,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
ep_index, urb, mem_flags);
if (ret < 0) {
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;

View File

@ -178,8 +178,10 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
return ERR_CAST(inode);
/* We need LINK caps to reliably check i_nlink */
err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
if (err)
if (err) {
iput(inode);
return ERR_PTR(err);
}
/* -ESTALE if inode as been unlinked and no file is open */
if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
iput(inode);

Some files were not shown because too many files have changed in this diff Show More