Merge 5.10.17 into android12-5.10

Changes in 5.10.17
	objtool: Fix seg fault with Clang non-section symbols
	Revert "dts: phy: add GPIO number and active state used for phy reset"
	gpio: mxs: GPIO_MXS should not default to y unconditionally
	gpio: ep93xx: fix BUG_ON port F usage
	gpio: ep93xx: Fix single irqchip with multi gpiochips
	tracing: Do not count ftrace events in top level enable output
	tracing: Check length before giving out the filter buffer
	drm/i915: Fix overlay frontbuffer tracking
	arm/xen: Don't probe xenbus as part of an early initcall
	cgroup: fix psi monitor for root cgroup
	Revert "drm/amd/display: Update NV1x SR latency values"
	drm/i915/tgl+: Make sure TypeC FIA is powered up when initializing it
	drm/dp_mst: Don't report ports connected if nothing is attached to them
	dmaengine: move channel device_node deletion to driver
	tmpfs: disallow CONFIG_TMPFS_INODE64 on s390
	tmpfs: disallow CONFIG_TMPFS_INODE64 on alpha
	soc: ti: omap-prm: Fix boot time errors for rst_map_012 bits 0 and 1
	arm64: dts: rockchip: Fix PCIe DT properties on rk3399
	arm64: dts: qcom: sdm845: Reserve LPASS clocks in gcc
	ARM: OMAP2+: Fix suspcious RCU usage splats for omap_enter_idle_coupled
	arm64: dts: rockchip: remove interrupt-names property from rk3399 vdec node
	platform/x86: hp-wmi: Disable tablet-mode reporting by default
	arm64: dts: rockchip: Disable display for NanoPi R2S
	ovl: perform vfs_getxattr() with mounter creds
	cap: fix conversions on getxattr
	ovl: skip getxattr of security labels
	scsi: lpfc: Fix EEH encountering oops with NVMe traffic
	x86/split_lock: Enable the split lock feature on another Alder Lake CPU
	nvme-pci: ignore the subsysem NQN on Phison E16
	drm/amd/display: Fix DPCD translation for LTTPR AUX_RD_INTERVAL
	drm/amd/display: Add more Clock Sources to DCN2.1
	drm/amd/display: Release DSC before acquiring
	drm/amd/display: Fix dc_sink kref count in emulated_link_detect
	drm/amd/display: Free atomic state after drm_atomic_commit
	drm/amd/display: Decrement refcount of dc_sink before reassignment
	riscv: virt_addr_valid must check the address belongs to linear mapping
	bfq-iosched: Revert "bfq: Fix computation of shallow depth"
	ARM: dts: lpc32xx: Revert set default clock rate of HCLK PLL
	kallsyms: fix nonconverging kallsyms table with lld
	ARM: ensure the signal page contains defined contents
	ARM: kexec: fix oops after TLB are invalidated
	ubsan: implement __ubsan_handle_alignment_assumption
	Revert "lib: Restrict cpumask_local_spread to houskeeping CPUs"
	x86/efi: Remove EFI PGD build time checks
	lkdtm: don't move ctors to .rodata
	KVM: x86: cleanup CR3 reserved bits checks
	cgroup-v1: add disabled controller check in cgroup1_parse_param()
	dmaengine: idxd: fix misc interrupt completion
	ath9k: fix build error with LEDS_CLASS=m
	mt76: dma: fix a possible memory leak in mt76_add_fragment()
	drm/vc4: hvs: Fix buffer overflow with the dlist handling
	dmaengine: idxd: check device state before issue command
	bpf: Unbreak BPF_PROG_TYPE_KPROBE when kprobe is called via do_int3
	bpf: Check for integer overflow when using roundup_pow_of_two()
	netfilter: xt_recent: Fix attempt to update deleted entry
	selftests: netfilter: fix current year
	netfilter: nftables: fix possible UAF over chains from packet path in netns
	netfilter: flowtable: fix tcp and udp header checksum update
	xen/netback: avoid race in xenvif_rx_ring_slots_available()
	net: hdlc_x25: Return meaningful error code in x25_open
	net: ipa: set error code in gsi_channel_setup()
	hv_netvsc: Reset the RSC count if NVSP_STAT_FAIL in netvsc_receive()
	net: enetc: initialize the RFS and RSS memories
	selftests: txtimestamp: fix compilation issue
	net: stmmac: set TxQ mode back to DCB after disabling CBS
	ibmvnic: Clear failover_pending if unable to schedule
	netfilter: conntrack: skip identical origin tuple in same zone only
	scsi: scsi_debug: Fix a memory leak
	x86/build: Disable CET instrumentation in the kernel for 32-bit too
	net: dsa: felix: implement port flushing on .phylink_mac_link_down
	net: hns3: add a check for queue_id in hclge_reset_vf_queue()
	net: hns3: add a check for tqp_index in hclge_get_ring_chain_from_mbx()
	net: hns3: add a check for index in hclge_get_rss_key()
	firmware_loader: align .builtin_fw to 8
	drm/sun4i: tcon: set sync polarity for tcon1 channel
	drm/sun4i: dw-hdmi: always set clock rate
	drm/sun4i: Fix H6 HDMI PHY configuration
	drm/sun4i: dw-hdmi: Fix max. frequency for H6
	clk: sunxi-ng: mp: fix parent rate change flag check
	i2c: stm32f7: fix configuration of the digital filter
	h8300: fix PREEMPTION build, TI_PRE_COUNT undefined
	scripts: set proper OpenSSL include dir also for sign-file
	x86/pci: Create PCI/MSI irqdomain after x86_init.pci.arch_init()
	arm64: mte: Allow PTRACE_PEEKMTETAGS access to the zero page
	rxrpc: Fix clearance of Tx/Rx ring when releasing a call
	udp: fix skb_copy_and_csum_datagram with odd segment sizes
	net: dsa: call teardown method on probe failure
	cpufreq: ACPI: Extend frequency tables to cover boost frequencies
	cpufreq: ACPI: Update arch scale-invariance max perf ratio if CPPC is not there
	net: gro: do not keep too many GRO packets in napi->rx_list
	net: fix iteration for sctp transport seq_files
	net/vmw_vsock: fix NULL pointer dereference
	net/vmw_vsock: improve locking in vsock_connect_timeout()
	net: watchdog: hold device global xmit lock during tx disable
	bridge: mrp: Fix the usage of br_mrp_port_switchdev_set_state
	switchdev: mrp: Remove SWITCHDEV_ATTR_ID_MRP_PORT_STAT
	vsock/virtio: update credit only if socket is not closed
	vsock: fix locking in vsock_shutdown()
	net/rds: restrict iovecs length for RDS_CMSG_RDMA_ARGS
	net/qrtr: restrict user-controlled length in qrtr_tun_write_iter()
	ovl: expand warning in ovl_d_real()
	kcov, usb: only collect coverage from __usb_hcd_giveback_urb in softirq
	Linux 5.10.17

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: Id0300681f52b51d3f466f1e66ec3a6c25f65f4d3
This commit is contained in:
Greg Kroah-Hartman 2021-02-17 16:45:08 +01:00
commit b129c98dc6
125 changed files with 1071 additions and 511 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
SUBLEVEL = 16
SUBLEVEL = 17
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View File

@ -329,9 +329,6 @@ clk: clock-controller@0 {
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
assigned-clock-rates = <208000000>;
};
};

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARM_KEXEC_INTERNAL_H
#define _ARM_KEXEC_INTERNAL_H
struct kexec_relocate_data {
unsigned long kexec_start_address;
unsigned long kexec_indirection_page;
unsigned long kexec_mach_type;
unsigned long kexec_r2;
};
#endif

View File

@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
@ -170,5 +171,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}

View File

@ -13,6 +13,7 @@
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@ -22,11 +23,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
/*
@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
/* Prepare parameters for reboot_code_buffer*/
set_kernel_text_rw();
kexec_start_address = image->start;
kexec_indirection_page = page_list;
kexec_mach_type = machine_arch_type;
kexec_boot_atags = image->arch.kernel_r2;
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
data = reboot_code_buffer + relocate_new_kernel_size;
data->kexec_start_address = image->start;
data->kexec_indirection_page = page_list;
data->kexec_mach_type = machine_arch_type;
data->kexec_r2 = image->arch.kernel_r2;
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);

View File

@ -5,14 +5,16 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
adr r7, relocate_new_kernel_end
ldr r0, [r7, #KEXEC_INDIR_PAGE]
ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
mov lr,r1
mov r0,#0
ldr r1,kexec_mach_type
ldr r2,kexec_boot_atags
ARM( ret lr )
THUMB( bx lr )
.align
.globl kexec_start_address
kexec_start_address:
.long 0x0
.globl kexec_indirection_page
kexec_indirection_page:
.long 0x0
.globl kexec_mach_type
kexec_mach_type:
.long 0x0
/* phy addr of the atags for the new kernel */
.globl kexec_boot_atags
kexec_boot_atags:
.long 0x0
mov lr, r1
mov r0, #0
ldr r1, [r7, #KEXEC_MACH_TYPE]
ldr r2, [r7, #KEXEC_R2]
ARM( ret lr )
THUMB( bx lr )
ENDPROC(relocate_new_kernel)
.align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size

View File

@ -693,18 +693,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
/* Poison the entire page */
memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
PAGE_SIZE / sizeof(u32));
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
/* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
ptr = (unsigned long)addr + offset;
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
/* Flush out all instructions in this page */
ptr = (unsigned long)addr;
flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}

View File

@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
tick_broadcast_enable();
RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
tick_broadcast_enter();
RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
clkdm_deny_idle(cpu_clkdm[1]);
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
clkdm_allow_idle(cpu_clkdm[1]);
RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit();
cpu_pm_out:
tick_broadcast_exit();
RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);

View File

@ -378,8 +378,6 @@ static int __init xen_guest_init(void)
return -ENOMEM;
}
gnttab_init();
if (!xen_initial_domain())
xenbus_probe();
/*
* Making sure board specific code will not set up ops for

View File

@ -415,7 +415,9 @@ &dsi0_phy {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
};
&gpu {

View File

@ -245,7 +245,9 @@ &cdsp_pas {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<GCC_LPASS_Q6_AXI_CLK>,
<GCC_LPASS_SWAY_CLK>;
};
&gpu {

View File

@ -114,6 +114,10 @@ &cpu3 {
cpu-supply = <&vdd_arm>;
};
&display_subsystem {
status = "disabled";
};
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;

View File

@ -234,6 +234,7 @@ pcie0: pcie@f8000000 {
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@ -252,7 +253,6 @@ pcie0: pcie@f8000000 {
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
@ -1278,7 +1278,6 @@ vdec: video-codec@ff660000 {
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";

View File

@ -1883,16 +1883,12 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
#ifdef CONFIG_ARM64_MTE
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
static bool cleared_zero_page = false;
/*
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
if (!cleared_zero_page) {
cleared_zero_page = true;
if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
mte_clear_page_tags(lm_alias(empty_zero_page));
}
kasan_init_hw_tags_cpu();
}

View File

@ -295,11 +295,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
* would cause the existing tags to be cleared if the page
* was never mapped with PROT_MTE.
*/
if (!test_bit(PG_mte_tagged, &page->flags)) {
if (!(vma->vm_flags & VM_MTE)) {
ret = -EOPNOTSUPP;
put_page(page);
break;
}
WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
/* limit access to the end of the page */
offset = offset_in_page(addr);

View File

@ -63,6 +63,9 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE, thread_info, preempt_count);
#ifdef CONFIG_PREEMPTION
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
#endif
return 0;
}

View File

@ -90,7 +90,6 @@ &eth0 {
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0771";
reg = <0>;
reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
};
};

View File

@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
#define virt_addr_valid(vaddr) ({ \
unsigned long _addr = (unsigned long)vaddr; \
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC

View File

@ -57,6 +57,9 @@ export BITS
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
@ -127,9 +130,6 @@ else
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
# Intel CET isn't enabled in the kernel
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
ifdef CONFIG_X86_X32

View File

@ -1159,6 +1159,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
{}
};

View File

@ -1829,6 +1829,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
{

View File

@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
if (!vmcb12_lma) {
if (vmcb12->save.cr4 & X86_CR4_PAE) {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
return false;
} else {
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
return false;
}
} else {
if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
(vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false;
}
if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))

View File

@ -346,9 +346,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
}
/* svm.c */
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
u32 svm_msrpm_offset(u32 msr);

View File

@ -9558,6 +9558,8 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
if (!(sregs->cr4 & X86_CR4_PAE)
|| !(sregs->efer & EFER_LMA))
return -EINVAL;
if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
return -EINVAL;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code

View File

@ -9,16 +9,23 @@
in the right sequence from here. */
static __init int pci_arch_init(void)
{
int type;
x86_create_pci_msi_domain();
int type, pcbios = 1;
type = pci_direct_probe();
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
if (x86_init.pci.arch_init)
pcbios = x86_init.pci.arch_init();
/*
* Must happen after x86_init.pci.arch_init(). Xen sets up the
* x86_init.irqs.create_pci_msi_domain there.
*/
x86_create_pci_msi_domain();
if (!pcbios)
return 0;
pci_pcbios_init();

View File

@ -115,31 +115,12 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
/*
* We can share all PGD entries apart from the one entry that
* covers the EFI runtime mapping space.
*
* Make sure the EFI runtime region mappings are guaranteed to
* only span a single PGD entry and that the entry also maps
* other important kernel regions.
*/
MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
(EFI_VA_END & PGDIR_MASK));
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
pgd_k = pgd_offset_k(PAGE_OFFSET);
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
/*
* As with PGDs, we share all P4D entries apart from the one entry
* that covers the EFI runtime mapping space.
*/
BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
pgd_k = pgd_offset_k(EFI_VA_END);
p4d_efi = p4d_offset(pgd_efi, 0);

View File

@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)

View File

@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
rate = *parent_rate / p / m;
} else {

View File

@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
unsigned int first_perf_state;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
perf = to_perf_data(data);
cpufreq_for_each_entry(pos, policy->freq_table)
cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
return policy->freq_table[0].frequency;
return policy->freq_table[data->first_perf_state].frequency;
}
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
unsigned int state;
pr_debug("%s (%d)\n", __func__, cpu);
@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
if (unlikely(!data || !policy->freq_table))
return 0;
cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
state = to_perf_data(data)->state;
if (state < data->first_perf_state)
state = data->first_perf_state;
cached_freq = policy->freq_table[state].frequency;
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
#ifdef CONFIG_ACPI_CPPC_LIB
static u64 get_max_boost_ratio(unsigned int cpu)
{
struct cppc_perf_caps perf_caps;
u64 highest_perf, nominal_perf;
int ret;
if (acpi_pstate_strict)
return 0;
ret = cppc_get_perf_caps(cpu, &perf_caps);
if (ret) {
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
cpu, ret);
return 0;
}
highest_perf = perf_caps.highest_perf;
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
return 0;
}
if (highest_perf < nominal_perf) {
pr_debug("CPU%d: nominal performance above highest\n", cpu);
return 0;
}
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
}
#else
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
#endif
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
unsigned int i;
unsigned int valid_states = 0;
unsigned int cpu = policy->cpu;
struct acpi_cpufreq_data *data;
unsigned int result = 0;
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
struct acpi_processor_performance *perf;
struct cpufreq_frequency_table *freq_table;
struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
unsigned int cpu = policy->cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
unsigned int valid_states = 0;
unsigned int result = 0;
unsigned int state_count;
u64 max_boost_ratio;
unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
GFP_KERNEL);
state_count = perf->state_count + 1;
max_boost_ratio = get_max_boost_ratio(cpu);
if (max_boost_ratio) {
/*
* Make a room for one more entry to represent the highest
* available "boost" frequency.
*/
state_count++;
valid_states++;
data->first_perf_state = valid_states;
} else {
/*
* If the maximum "boost" frequency is unknown, ask the arch
* scale-invariance code to use the "nominal" performance for
* CPU utilization scaling so as to prevent the schedutil
* governor from selecting inadequate CPU frequencies.
*/
arch_set_max_freq_ratio(true);
}
freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
if (max_boost_ratio) {
unsigned int state = data->first_perf_state;
unsigned int freq = freq_table[state].frequency;
/*
* Because the loop above sorts the freq_table entries in the
* descending order, freq is the maximum frequency in the table.
* Assume that it corresponds to the CPPC nominal frequency and
* use it to populate the frequency field of the extra "boost"
* frequency entry.
*/
freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
/*
* The purpose of the extra "boost" frequency entry is to make
* the rest of cpufreq aware of the real maximum frequency, but
* the way to request it is the same as for the first_perf_state
* entry that is expected to cover the entire range of "boost"
* frequencies of the CPU, so copy the driver_data value from
* that entry.
*/
freq_table[0].driver_data = freq_table[state].driver_data;
}
policy->freq_table = freq_table;
perf->state = 0;
@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
struct acpi_cpufreq_data *data = policy->driver_data;
unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}

View File

@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
mutex_lock(&dma_list_mutex);
list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);

View File

@ -325,17 +325,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
return false;
}
static inline bool idxd_device_is_halted(struct idxd_device *idxd)
{
union gensts_reg gensts;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
return (gensts.state == IDXD_DEVICE_STATE_HALT);
}
/*
* This is function is only used for reset during probe and will
* poll for completion. Once the device is setup with interrupts,
* all commands will be done via interrupt completion.
*/
void idxd_device_init_reset(struct idxd_device *idxd)
int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
return -ENXIO;
}
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
@ -346,6 +360,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
IDXD_CMDSTS_ACTIVE)
cpu_relax();
spin_unlock_irqrestore(&idxd->dev_lock, flags);
return 0;
}
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
@ -355,6 +370,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long flags;
if (idxd_device_is_halted(idxd)) {
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
*status = IDXD_CMDSTS_HW_ERR;
return;
}
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = cmd_code;
cmd.operand = operand;

View File

@ -214,5 +214,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
struct dma_chan *chan = &wq->dma_chan;
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
list_del(&chan->device_node);
}

View File

@ -281,7 +281,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
void idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);

View File

@ -289,7 +289,10 @@ static int idxd_probe(struct idxd_device *idxd)
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
idxd_device_init_reset(idxd);
rc = idxd_device_init_reset(idxd);
if (rc < 0)
return rc;
dev_dbg(dev, "IDXD reset complete\n");
idxd_read_caps(idxd);

View File

@ -53,19 +53,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
return IRQ_WAKE_THREAD;
}
irqreturn_t idxd_misc_thread(int vec, void *data)
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = irq_entry->idxd;
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
u32 cause, val = 0;
u32 val = 0;
int i;
bool err = false;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
for (i = 0; i < 4; i++)
@ -123,7 +118,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
val);
if (!err)
goto out;
return 0;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@ -144,10 +139,33 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
return -ENXIO;
}
}
out:
return 0;
}
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
struct idxd_device *idxd = irq_entry->idxd;
int rc;
u32 cause;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
while (cause) {
rc = process_misc_interrupts(idxd, cause);
if (rc < 0)
break;
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (cause)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
}
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}

View File

@ -428,8 +428,9 @@ config GPIO_MXC
select GENERIC_IRQ_CHIP
config GPIO_MXS
def_bool y
bool "Freescale MXS GPIO support" if COMPILE_TEST
depends on ARCH_MXS || COMPILE_TEST
default y if ARCH_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP

View File

@ -25,6 +25,9 @@
/* Maximum value for gpio line identifiers */
#define EP93XX_GPIO_LINE_MAX 63
/* Number of GPIO chips in EP93XX */
#define EP93XX_GPIO_CHIP_NUM 8
/* Maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ 23
@ -34,74 +37,75 @@
*/
#define EP93XX_GPIO_F_IRQ_BASE 80
struct ep93xx_gpio_irq_chip {
struct irq_chip ic;
u8 irq_offset;
u8 int_unmasked;
u8 int_enabled;
u8 int_type1;
u8 int_type2;
u8 int_debounce;
};
struct ep93xx_gpio_chip {
struct gpio_chip gc;
struct ep93xx_gpio_irq_chip *eic;
};
struct ep93xx_gpio {
void __iomem *base;
struct gpio_chip gc[8];
struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
};
#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
return egc->eic;
}
/*************************************************************************
* Interrupt handling for EP93xx on-chip GPIOs
*************************************************************************/
static unsigned char gpio_int_unmasked[3];
static unsigned char gpio_int_enabled[3];
static unsigned char gpio_int_type1[3];
static unsigned char gpio_int_type2[3];
static unsigned char gpio_int_debounce[3];
#define EP93XX_INT_TYPE1_OFFSET 0x00
#define EP93XX_INT_TYPE2_OFFSET 0x04
#define EP93XX_INT_EOI_OFFSET 0x08
#define EP93XX_INT_EN_OFFSET 0x0c
#define EP93XX_INT_STATUS_OFFSET 0x10
#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
/* Port ordering is: A B F */
static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
struct ep93xx_gpio_irq_chip *eic)
{
BUG_ON(port > 2);
writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
writeb_relaxed(0, epg->base + int_en_register_offset[port]);
writeb_relaxed(eic->int_type2,
epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
writeb_relaxed(gpio_int_type2[port],
epg->base + int_type2_register_offset[port]);
writeb_relaxed(eic->int_type1,
epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
writeb_relaxed(gpio_int_type1[port],
epg->base + int_type1_register_offset[port]);
writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
epg->base + int_en_register_offset[port]);
}
static int ep93xx_gpio_port(struct gpio_chip *gc)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = 0;
while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
port++;
/* This should not happen but is there as a last safeguard */
if (port == ARRAY_SIZE(epg->gc)) {
pr_crit("can't find the GPIO port\n");
return 0;
}
return port;
writeb_relaxed(eic->int_unmasked & eic->int_enabled,
epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
if (enable)
gpio_int_debounce[port] |= port_mask;
eic->int_debounce |= port_mask;
else
gpio_int_debounce[port] &= ~port_mask;
eic->int_debounce &= ~port_mask;
writeb(gpio_int_debounce[port],
epg->base + int_debounce_register_offset[port]);
writeb(eic->int_debounce,
epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
}
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
*/
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
offset));
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
offset));
chained_irq_exit(irqchip, desc);
@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
ep93xx_gpio_update_int_params(epg, port);
eic->int_type2 ^= port_mask; /* switch edge direction */
ep93xx_gpio_update_int_params(epg, eic);
}
writeb(port_mask, epg->base + eoi_register_offset[port]);
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
eic->int_type2 ^= port_mask; /* switch edge direction */
gpio_int_unmasked[port] &= ~port_mask;
ep93xx_gpio_update_int_params(epg, port);
eic->int_unmasked &= ~port_mask;
ep93xx_gpio_update_int_params(epg, eic);
writeb(port_mask, epg->base + eoi_register_offset[port]);
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, port);
eic->int_unmasked &= ~BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
gpio_int_unmasked[port] |= BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, port);
eic->int_unmasked |= BIT(d->irq & 7);
ep93xx_gpio_update_int_params(epg, eic);
}
/*
@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
int port = ep93xx_gpio_port(gc);
int offset = d->irq & 7;
int port_mask = BIT(offset);
irq_flow_handler_t handler;
@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] |= port_mask;
eic->int_type1 |= port_mask;
eic->int_type2 |= port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
gpio_int_type1[port] |= port_mask;
gpio_int_type2[port] &= ~port_mask;
eic->int_type1 |= port_mask;
eic->int_type2 &= ~port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] |= port_mask;
eic->int_type1 &= ~port_mask;
eic->int_type2 |= port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
gpio_int_type1[port] &= ~port_mask;
gpio_int_type2[port] &= ~port_mask;
eic->int_type1 &= ~port_mask;
eic->int_type2 &= ~port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
gpio_int_type1[port] |= port_mask;
eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
if (gc->get(gc, offset))
gpio_int_type2[port] &= ~port_mask; /* falling */
eic->int_type2 &= ~port_mask; /* falling */
else
gpio_int_type2[port] |= port_mask; /* rising */
eic->int_type2 |= port_mask; /* rising */
handler = handle_edge_irq;
break;
default:
@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handler);
gpio_int_enabled[port] |= port_mask;
eic->int_enabled |= port_mask;
ep93xx_gpio_update_int_params(epg, port);
ep93xx_gpio_update_int_params(epg, eic);
return 0;
}
static struct irq_chip ep93xx_gpio_irq_chip = {
.name = "GPIO",
.irq_ack = ep93xx_gpio_irq_ack,
.irq_mask_ack = ep93xx_gpio_irq_mask_ack,
.irq_mask = ep93xx_gpio_irq_mask,
.irq_unmask = ep93xx_gpio_irq_unmask,
.irq_set_type = ep93xx_gpio_irq_type,
};
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
@ -276,17 +271,19 @@ struct ep93xx_gpio_bank {
const char *label;
int data;
int dir;
int irq;
int base;
bool has_irq;
bool has_hierarchical_irq;
unsigned int irq_base;
};
#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
.irq = _irq, \
.base = _base, \
.has_irq = _has_irq, \
.has_hierarchical_irq = _has_hier, \
@ -295,16 +292,16 @@ struct ep93xx_gpio_bank {
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
/* Bank A has 8 IRQs */
EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64),
/* Bank B has 8 IRQs */
EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72),
EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
/* Bank F has 8 IRQs */
EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0),
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
};
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
return EP93XX_GPIO_F_IRQ_BASE + offset;
}
static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
{
ic->irq_ack = ep93xx_gpio_irq_ack;
ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
ic->irq_mask = ep93xx_gpio_irq_mask;
ic->irq_unmask = ep93xx_gpio_irq_unmask;
ic->irq_set_type = ep93xx_gpio_irq_type;
}
static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank)
{
void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir;
struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
int err;
@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
struct irq_chip *ic;
gc->set_config = ep93xx_gpio_set_config;
girq->chip = &ep93xx_gpio_irq_chip;
egc->eic = devm_kcalloc(dev, 1,
sizeof(*egc->eic),
GFP_KERNEL);
if (!egc->eic)
return -ENOMEM;
egc->eic->irq_offset = bank->irq;
ic = &egc->eic->ic;
ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
if (!ic->name)
return -ENOMEM;
ep93xx_init_irq_chip(dev, ic);
girq->chip = ic;
}
if (bank->has_irq) {
@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq,
&ep93xx_gpio_irq_chip,
girq->chip,
handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
struct gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))

View File

@ -1792,8 +1792,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
if (prev_sink != NULL)
dc_sink_retain(prev_sink);
if (prev_sink)
dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@ -2261,8 +2261,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
if (aconnector->dc_sink)
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@ -7870,14 +7872,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
goto err;
goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
goto err;
goto out;
/* force a restore */
crtc_state->mode_changed = true;
@ -7887,17 +7889,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
goto err;
goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
if (!ret)
return 0;
err:
DRM_ERROR("Restoring old state failed with %i\n", ret);
out:
drm_atomic_state_put(state);
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}

View File

@ -828,6 +828,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@ -845,7 +848,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
return false;
}
return true;

View File

@ -877,14 +877,14 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
aux_rd_interval_us = 400;
break;
case 0x02:
aux_rd_interval_us = 4000;
break;
case 0x03:
case 0x02:
aux_rd_interval_us = 8000;
break;
case 0x03:
aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
break;

View File

@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
},
},
.num_states = 5,
.sr_exit_time_us = 11.6,
.sr_enter_plus_exit_time_us = 13.9,
.sr_exit_time_us = 8.6,
.sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,

View File

@ -902,6 +902,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_PLL3,
DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@ -1880,6 +1882,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL3,
&clk_src_regs[3], false);
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL4,
&clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;

View File

@ -4224,6 +4224,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;

View File

@ -182,6 +182,7 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
struct intel_frontbuffer *frontbuffer;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@ -282,21 +283,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
struct intel_frontbuffer *from = NULL, *to = NULL;
struct intel_frontbuffer *frontbuffer = NULL;
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
if (overlay->vma)
from = intel_frontbuffer_get(overlay->vma->obj);
if (vma)
to = intel_frontbuffer_get(vma->obj);
frontbuffer = intel_frontbuffer_get(vma->obj);
intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
INTEL_FRONTBUFFER_OVERLAY(pipe));
if (to)
intel_frontbuffer_put(to);
if (from)
intel_frontbuffer_put(from);
if (overlay->frontbuffer)
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));

View File

@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
static void
tc_port_load_fia_params(struct drm_i915_private *i915,
struct intel_digital_port *dig_port)
{
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
u32 modular_fia;
if (INTEL_INFO(i915)->display.has_modular_fia) {
modular_fia = intel_uncore_read(&i915->uncore,
PORT_TX_DFLEXDPSP(FIA1));
drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
modular_fia &= MODULAR_FIA_MASK;
} else {
modular_fia = 0;
}
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
if (modular_fia) {
dig_port->tc_phy_fia = tc_port / 2;
dig_port->tc_phy_fia_idx = tc_port % 2;
} else {
dig_port->tc_phy_fia = FIA1;
dig_port->tc_phy_fia_idx = tc_port;
}
}
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
mutex_unlock(&dig_port->tc_lock);
}
static bool
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
intel_wakeref_t wakeref;
u32 val;
if (!INTEL_INFO(i915)->display.has_modular_fia)
return false;
wakeref = tc_cold_block(dig_port);
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
tc_cold_unblock(dig_port, wakeref);
drm_WARN_ON(&i915->drm, val == 0xffffffff);
return val & MODULAR_FIA_MASK;
}
static void
tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
/*
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
* than two TC ports, there are multiple instances of Modular FIA.
*/
if (tc_has_modular_fia(i915, dig_port)) {
dig_port->tc_phy_fia = tc_port / 2;
dig_port->tc_phy_fia_idx = tc_port % 2;
} else {
dig_port->tc_phy_fia = FIA1;
dig_port->tc_phy_fia_idx = tc_port;
}
}
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);

View File

@ -689,6 +689,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
/* Setup the polarity of multiple signals */
if (tcon->quirks->polarity_in_ch0) {
val = 0;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
} else {
/* according to vendor driver, this bit must be always set */
val = SUN4I_TCON1_IO_POL_UNKNOWN;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
}
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@ -1517,6 +1541,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
.polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};

View File

@ -153,6 +153,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
/* there is no documentation about this bit */
#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@ -235,6 +240,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */

View File

@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
if (hdmi->quirks->set_rate)
clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
{
/*
* Controller support maximum of 594 MHz, which correlates to
* 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
* 340 MHz scrambling has to be enabled. Because scrambling is
* not yet implemented, just limit to 340 MHz for now.
* 4K@60Hz 4:4:4 or RGB.
*/
if (mode->clock > 340000)
if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
.set_rate = true,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {

View File

@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
unsigned int set_rate : 1;
unsigned int use_drm_infoframe : 1;
};

View File

@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
{ 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
{ 59400000, { 0x0008, 0x0008, 0x0008 }, },
{ 72000000, { 0x0008, 0x0008, 0x001b }, },
{ 74250000, { 0x0013, 0x0013, 0x0013 }, },
{ 90000000, { 0x0008, 0x001a, 0x001b }, },
{ 118800000, { 0x001b, 0x001a, 0x001b }, },
{ 144000000, { 0x001b, 0x001a, 0x0034 }, },
{ 180000000, { 0x001b, 0x0033, 0x0034 }, },
{ 216000000, { 0x0036, 0x0033, 0x0034 }, },
{ 237600000, { 0x0036, 0x0033, 0x001b }, },
{ 288000000, { 0x0036, 0x001b, 0x001b }, },
{ 297000000, { 0x0019, 0x001b, 0x0019 }, },
{ 330000000, { 0x0036, 0x001b, 0x001b }, },
{ 594000000, { 0x003f, 0x001b, 0x001b }, },
{ 74250000, { 0x0013, 0x001a, 0x001b }, },
{ 148500000, { 0x0019, 0x0033, 0x0034 }, },
{ 297000000, { 0x0019, 0x001b, 0x001b }, },
{ 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
{ 74250000, 0x8009, 0x0004, 0x0232},
{ 148500000, 0x8029, 0x0004, 0x0273},
{ 594000000, 0x8039, 0x0004, 0x014a},
{ 27000000, 0x8009, 0x0007, 0x02b0 },
{ 74250000, 0x8009, 0x0006, 0x022d },
{ 148500000, 0x8029, 0x0006, 0x0270 },
{ 297000000, 0x8039, 0x0005, 0x01ab },
{ 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};

View File

@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
vc4_state->dlist[vc4_state->dlist_count++] = val;
vc4_state->dlist_count++;
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
{
unsigned int idx = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@ -978,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE)
vc4_state->lbm_offset = vc4_state->dlist_count++;
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
vc4_state->lbm_offset = vc4_state->dlist_count;
vc4_dlist_counter_increment(vc4_state);
}
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel

View File

@ -57,6 +57,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5)
@ -160,7 +162,7 @@ enum {
};
#define STM32F7_I2C_DNF_DEFAULT 0
#define STM32F7_I2C_DNF_MAX 16
#define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
/* Program the Digital Filter */
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF_MASK);
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
}

View File

@ -18,7 +18,7 @@ CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
--rename-section .text=.rodata,alloc,readonly,load
--rename-section .noinstr.text=.rodata,alloc,readonly,load
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)

View File

@ -5,7 +5,7 @@
*/
#include "lkdtm.h"
void notrace lkdtm_rodata_do_nothing(void)
void noinstr lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}

View File

@ -214,9 +214,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
int err;
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
DEV_MAC_ENA_CFG);
ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
err = ocelot_port_flush(ocelot, port);
if (err)
dev_err(ocelot->dev, "failed to flush port %d: %d\n",
port, err);
/* Put the port in reset. */
ocelot_port_writel(ocelot_port,
DEV_CLOCK_CFG_MAC_TX_RST |
DEV_CLOCK_CFG_MAC_RX_RST |
DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
DEV_CLOCK_CFG);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,

View File

@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
#define ENETC_PRSSCAPR 0x1404
#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)

View File

@ -1004,6 +1004,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
phylink_destroy(priv->phylink);
}
/* Initialize the entire shared memory for the flow steering entries
* of this port (PF + VFs)
*/
static int enetc_init_port_rfs_memory(struct enetc_si *si)
{
struct enetc_cmd_rfse rfse = {0};
struct enetc_hw *hw = &si->hw;
int num_rfs, i, err = 0;
u32 val;
val = enetc_port_rd(hw, ENETC_PRFSCAPR);
num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
for (i = 0; i < num_rfs; i++) {
err = enetc_set_fs_entry(si, &rfse, i);
if (err)
break;
}
return err;
}
static int enetc_init_port_rss_memory(struct enetc_si *si)
{
struct enetc_hw *hw = &si->hw;
int num_rss, err;
int *rss_table;
u32 val;
val = enetc_port_rd(hw, ENETC_PRSSCAPR);
num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
if (!num_rss)
return 0;
rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
if (!rss_table)
return -ENOMEM;
err = enetc_set_rss_table(si, rss_table, num_rss);
kfree(rss_table);
return err;
}
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@ -1058,6 +1103,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
goto err_init_port_rfs;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
goto err_init_port_rss;
}
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
@ -1086,6 +1143,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
err_init_port_rss:
err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:

View File

@ -9404,12 +9404,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
if (queue_id >= handle->kinfo.num_tqps) {
dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
queue_id);
return;
}
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);

View File

@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
struct hclge_dev *hdev = vport->back;
int ring_num;
int i = 0;
int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
for (i = 0; i < ring_num; i++) {
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
req->msg.param[i].tqp_index,
vport->nic.kinfo.rss_size - 1);
return -EINVAL;
}
}
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
req->msg.param[i].ring_type);
req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
[req->msg.param[i].tqp_index]);
[req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@ -581,6 +591,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
index = mbx_req->msg.data[0];
/* Check the query index of rss_hash_key from VF, make sure no
* more than the size of rss_hash_key.
*/
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
sizeof(vport[0].rss_hash_key)) {
dev_warn(&hdev->pdev->dev,
"failed to get the rss hash key, the index(%u) invalid !\n",
index);
return;
}
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);

View File

@ -4813,7 +4813,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
if (rc && rc != -EBUSY) {
/* We were unable to schedule the failover
* reset either because the adapter was still
* probing (eg: during kexec) or we could not
* allocate memory. Clear the failover_pending
* flag since no one else will. We ignore
* EBUSY because it means either FAILOVER reset
* is already scheduled or the adapter is
* being removed.
*/
netdev_err(netdev,
"Error %ld scheduling failover reset\n",
rc);
adapter->failover_pending = false;
}
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");

View File

@ -348,6 +348,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
}
}
static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
{
return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
}
int ocelot_port_flush(struct ocelot *ocelot, int port)
{
int err, val;
/* Disable dequeuing from the egress queues */
ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
QSYS_PORT_MODE_DEQUEUE_DIS,
QSYS_PORT_MODE, port);
/* Disable flow control */
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
/* Disable priority flow control */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
/* Wait at least the time it takes to receive a frame of maximum length
* at the port.
* Worst-case delays for 10 kilobyte jumbo frames are:
* 8 ms on a 10M port
* 800 μs on a 100M port
* 80 μs on a 1G port
* 32 μs on a 2.5G port
*/
usleep_range(8000, 10000);
/* Disable half duplex backpressure. */
ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
SYS_FRONT_PORT_MODE, port);
/* Flush the queues associated with the port. */
ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
REW_PORT_CFG, port);
/* Enable dequeuing from the egress queues. */
ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
port);
/* Wait until flushing is complete. */
err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
100, 2000000, false, ocelot, port);
/* Clear flushing again. */
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
return err;
}
EXPORT_SYMBOL(ocelot_port_flush);
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev)
{

View File

@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
{
u32 cur = ocelot_port_readl(port, reg);
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
}
EXPORT_SYMBOL(ocelot_port_rmwl);
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{

View File

@ -330,7 +330,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
MTL_QUEUE_DCB);
if (ret)
return ret;
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
/* Port Transmit Rate and Speed Divider */

View File

@ -1253,8 +1253,11 @@ static int netvsc_receive(struct net_device *ndev,
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
if (unlikely(ret != NVSP_STAT_SUCCESS))
if (unlikely(ret != NVSP_STAT_SUCCESS)) {
/* Drop incomplete packet */
nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
}
}
enq_receive_complete(ndev, net_device, q_idx,

View File

@ -508,8 +508,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return ret;
drop:
/* Drop incomplete packet */
nvchan->rsc.cnt = 0;
return NVSP_STAT_FAIL;
}

View File

@ -1573,6 +1573,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
if (!channel->gsi)
continue; /* Ignore uninitialized channels */
ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;

View File

@ -171,11 +171,11 @@ static int x25_open(struct net_device *dev)
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return result;
return -ENOMEM;
result = lapb_getparms(dev, &params);
if (result != LAPB_OK)
return result;
return -EINVAL;
if (state(hdlc)->settings.dce)
params.mode = params.mode | LAPB_DCE;
@ -190,7 +190,7 @@ static int x25_open(struct net_device *dev)
result = lapb_setparms(dev, &params);
if (result != LAPB_OK)
return result;
return -EINVAL;
return 0;
}

View File

@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
imply NEW_LEDS
imply LEDS_CLASS
imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
imply NEW_LEDS
imply LEDS_CLASS
imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271

View File

@ -519,15 +519,17 @@ static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
offset += q->buf_offset;
struct page *page = virt_to_head_page(data);
int offset = data - page_address(page) + q->buf_offset;
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
} else {
skb_free_frag(data);
}
if (more)

View File

@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
unsigned long flags;
spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
if (!skb)
if (!skb) {
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false;
}
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if (skb->sw_hash)
needed++;
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;

View File

@ -3247,6 +3247,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */

View File

@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
static int enable_tablet_mode_sw = -1;
module_param(enable_tablet_mode_sw, int, 0444);
MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@ -654,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
}
/* Tablet mode */
val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
if (!(val < 0)) {
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
if (enable_tablet_mode_sw > 0) {
val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
if (val >= 0) {
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
}
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);

View File

@ -714,6 +714,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return -ENODEV;
}
if (!vport->phba->sli4_hba.nvmels_wq)
return -ENOMEM;
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.

View File

@ -6881,6 +6881,7 @@ static void __exit scsi_debug_exit(void)
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);

View File

@ -552,6 +552,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
@ -599,6 +600,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
map++;
}
/* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
if (prm->data->rstmap == rst_map_012) {
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
if ((v & reset->mask) != reset->mask) {
dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
writel_relaxed(reset->mask, reset->prm->base +
reset->prm->data->rstctrl);
}
}
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}

View File

@ -1646,9 +1646,16 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
kcov_remote_start_usb((u64)urb->dev->bus->busnum);
/*
* This function can be called in task context inside another remote
* coverage collection section, but KCOV doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
if (in_serving_softirq())
kcov_remote_start_usb((u64)urb->dev->bus->busnum);
urb->complete(urb);
kcov_remote_stop();
if (in_serving_softirq())
kcov_remote_stop();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);

View File

@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus);
void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);

View File

@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
void xenbus_probe(void)
static void xenbus_probe(void)
{
xenstored_ready = 1;

View File

@ -204,7 +204,7 @@ config TMPFS_XATTR
config TMPFS_INODE64
bool "Use 64-bit ino_t by default in tmpfs"
depends on TMPFS && 64BIT
depends on TMPFS && 64BIT && !(S390 || ALPHA)
default n
help
tmpfs has historically used only inode numbers as wide as an unsigned

View File

@ -84,6 +84,14 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
if (ovl_is_private_xattr(sb, name))
continue;
error = security_inode_copy_up_xattr(name);
if (error < 0 && error != -EOPNOTSUPP)
break;
if (error == 1) {
error = 0;
continue; /* Discard */
}
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
@ -107,13 +115,6 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
goto retry;
}
error = security_inode_copy_up_xattr(name);
if (error < 0 && error != -EOPNOTSUPP)
break;
if (error == 1) {
error = 0;
continue; /* Discard */
}
error = vfs_setxattr(new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))

View File

@ -346,7 +346,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
goto out;
if (!value && !upperdentry) {
old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0);
revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
}

View File

@ -84,7 +84,7 @@ static void ovl_dentry_release(struct dentry *dentry)
static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode)
{
struct dentry *real;
struct dentry *real = NULL, *lower;
/* It's an overlay file */
if (inode && d_inode(dentry) == inode)
@ -103,9 +103,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
return real;
real = ovl_dentry_lowerdata(dentry);
if (!real)
lower = ovl_dentry_lowerdata(dentry);
if (!lower)
goto bug;
real = lower;
/* Handle recursion */
real = d_real(real, inode);
@ -113,8 +114,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (!inode || inode == d_inode(real))
return real;
bug:
WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
__func__, dentry, inode ? inode->i_sb->s_id : "NULL",
inode ? inode->i_ino : 0, real,
real && d_inode(real) ? d_inode(real)->i_ino : 0);
return dentry;
}

View File

@ -462,7 +462,7 @@
} \
\
/* Built-in firmware blobs */ \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \

View File

@ -4313,6 +4313,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@ -4320,6 +4321,7 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}

View File

@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
struct csum_state {
__wsum csum;
size_t off;
};
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,

View File

@ -41,7 +41,6 @@ enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
#endif
};
@ -60,7 +59,6 @@ struct switchdev_attr {
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
bool mc_disabled; /* MC_DISABLED */
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
u8 mrp_port_state; /* MRP_PORT_STATE */
u8 mrp_port_role; /* MRP_PORT_ROLE */
#endif
} u;

View File

@ -703,6 +703,7 @@ struct ocelot_policer {
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
@ -731,6 +732,7 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
int ocelot_port_flush(struct ocelot *ocelot, int port);
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,

View File

@ -192,8 +192,6 @@ void xs_suspend_cancel(void);
struct work_struct;
void xenbus_probe(void);
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \

View File

@ -116,6 +116,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries);
if (!n_buckets)
return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));

View File

@ -918,6 +918,9 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
return invalfc(fc, "Disabled controller '%s'",
param->key);
ctx->subsys_mask |= (1 << i);
return 0;
}

View File

@ -3569,6 +3569,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
{
struct psi_trigger *new;
struct cgroup *cgrp;
struct psi_group *psi;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
@ -3577,7 +3578,8 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
cgroup_get(cgrp);
cgroup_kn_unlock(of->kn);
new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
new = psi_trigger_create(psi, buf, nbytes, res);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);

View File

@ -93,9 +93,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
unsigned int ret;
if (in_nmi()) /* not supported yet */
return 1;
cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {

View File

@ -2746,7 +2746,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
if (val == 1) {
if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;

View File

@ -1212,7 +1212,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
if (!trace_event_name(call) || !call->class || !call->class->reg)
if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
!trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)

View File

@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
int cpu, hk_flags;
const struct cpumask *mask;
int cpu;
hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
i %= cpumask_weight(mask);
i %= num_online_cpus();
if (node == NUMA_NO_NODE) {
for_each_cpu(cpu, mask) {
for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
}
} else {
/* NUMA first. */
for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
if (i-- == 0)
return cpu;
}
for_each_cpu(cpu, mask) {
for_each_cpu(cpu, cpu_online_mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;

View File

@ -592,14 +592,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
struct csum_state *csstate,
struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
__wsum sum = csstate->csum;
size_t off = csstate->off;
unsigned int i_head;
size_t n, r;
size_t off = 0;
__wsum sum = *csum;
if (!sanity(i))
return 0;
@ -621,7 +622,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
i_head++;
} while (n);
i->count -= bytes;
*csum = sum;
csstate->csum = sum;
csstate->off = off;
return bytes;
}
@ -1522,18 +1524,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
struct csum_state *csstate = _csstate;
const char *from = addr;
__wsum *csum = csump;
__wsum sum, next;
size_t off = 0;
size_t off;
if (unlikely(iov_iter_is_pipe(i)))
return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
sum = *csum;
sum = csstate->csum;
off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
@ -1561,7 +1564,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
off += v.iov_len;
})
)
*csum = sum;
csstate->csum = sum;
csstate->off = off;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);

View File

@ -427,3 +427,34 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
unsigned long align,
unsigned long offset);
void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
unsigned long align,
unsigned long offset)
{
struct alignment_assumption_data *data = _data;
unsigned long real_ptr;
if (suppress_report(&data->location))
return;
ubsan_prologue(&data->location, "alignment-assumption");
if (offset)
pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
align, offset, data->type->type_name);
else
pr_err("assumption of %lu byte alignment for pointer of type %s failed",
align, data->type->type_name);
real_ptr = ptr - offset;
pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
real_ptr & (align - 1));
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);

View File

@ -78,6 +78,12 @@ struct invalid_value_data {
struct type_descriptor *type;
};
struct alignment_assumption_data {
struct source_location location;
struct source_location assumption_location;
struct type_descriptor *type;
};
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;

View File

@ -544,19 +544,22 @@ int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
int br_mrp_set_port_state(struct net_bridge_port *p,
enum br_mrp_port_state_type state)
{
u32 port_state;
if (!p || !(p->flags & BR_MRP_AWARE))
return -EINVAL;
spin_lock_bh(&p->br->lock);
if (state == BR_MRP_PORT_STATE_FORWARDING)
p->state = BR_STATE_FORWARDING;
port_state = BR_STATE_FORWARDING;
else
p->state = BR_STATE_BLOCKING;
port_state = BR_STATE_BLOCKING;
p->state = port_state;
spin_unlock_bh(&p->br->lock);
br_mrp_port_switchdev_set_state(p, state);
br_mrp_port_switchdev_set_state(p, port_state);
return 0;
}

View File

@ -169,13 +169,12 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
return err;
}
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
enum br_mrp_port_state_type state)
int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
.id = SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
.u.mrp_port_state = state,
.id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
.u.stp_state = state,
};
int err;

Some files were not shown because too many files have changed in this diff Show More