Merge "Merge android12-5.10.17 (b129c98
) into msm-5.10"
This commit is contained in:
commit
04d38e4acb
@ -5178,6 +5178,12 @@
|
||||
growing up) the main stack are reserved for no other
|
||||
mapping. Default value is 256 pages.
|
||||
|
||||
stack_depot_disable= [KNL]
|
||||
Setting this to true through kernel command line will
|
||||
disable the stack depot thereby saving the static memory
|
||||
consumed by the stack hash table. By default this is set
|
||||
to false.
|
||||
|
||||
stacktrace [FTRACE]
|
||||
Enabled the stack tracer on boot up.
|
||||
|
||||
|
@ -155,18 +155,18 @@ Boot parameters
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
Hardware tag-based KASAN mode (see the section about various modes below) is
|
||||
intended for use in production as a security mitigation. Therefore it supports
|
||||
intended for use in production as a security mitigation. Therefore, it supports
|
||||
boot parameters that allow to disable KASAN competely or otherwise control
|
||||
particular KASAN features.
|
||||
|
||||
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
|
||||
|
||||
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
|
||||
traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
|
||||
``off``).
|
||||
traces collection (default: ``on``).
|
||||
|
||||
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
|
||||
report or also panic the kernel (default: ``report``).
|
||||
report or also panic the kernel (default: ``report``). Note, that tag
|
||||
checking gets disabled after the first reported bug.
|
||||
|
||||
For developers
|
||||
~~~~~~~~~~~~~~
|
||||
@ -296,6 +296,9 @@ Note, that enabling CONFIG_KASAN_HW_TAGS always results in in-kernel TBI being
|
||||
enabled. Even when kasan.mode=off is provided, or when the hardware doesn't
|
||||
support MTE (but supports TBI).
|
||||
|
||||
Hardware tag-based KASAN only reports the first found bug. After that MTE tag
|
||||
checking gets disabled.
|
||||
|
||||
What memory accesses are sanitised by KASAN?
|
||||
--------------------------------------------
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 16
|
||||
SUBLEVEL = 17
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1175,6 +1175,7 @@
|
||||
jiffies
|
||||
jiffies_to_msecs
|
||||
jiffies_to_usecs
|
||||
kasan_flag_enabled
|
||||
kasprintf
|
||||
kernel_bind
|
||||
kernel_connect
|
||||
|
@ -329,9 +329,6 @@ clk: clock-controller@0 {
|
||||
|
||||
clocks = <&xtal_32k>, <&xtal>;
|
||||
clock-names = "xtal_32k", "xtal";
|
||||
|
||||
assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
|
||||
assigned-clock-rates = <208000000>;
|
||||
};
|
||||
};
|
||||
|
||||
|
12
arch/arm/include/asm/kexec-internal.h
Normal file
12
arch/arm/include/asm/kexec-internal.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ARM_KEXEC_INTERNAL_H
|
||||
#define _ARM_KEXEC_INTERNAL_H
|
||||
|
||||
struct kexec_relocate_data {
|
||||
unsigned long kexec_start_address;
|
||||
unsigned long kexec_indirection_page;
|
||||
unsigned long kexec_mach_type;
|
||||
unsigned long kexec_r2;
|
||||
};
|
||||
|
||||
#endif
|
@ -12,6 +12,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kexec-internal.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
#include <asm/mach/arch.h>
|
||||
@ -170,5 +171,9 @@ int main(void)
|
||||
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
|
||||
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
|
||||
#endif
|
||||
DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
|
||||
DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
|
||||
DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
|
||||
DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
|
||||
return 0;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kexec-internal.h>
|
||||
#include <asm/fncpy.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/smp_plat.h>
|
||||
@ -22,11 +23,6 @@
|
||||
extern void relocate_new_kernel(void);
|
||||
extern const unsigned int relocate_new_kernel_size;
|
||||
|
||||
extern unsigned long kexec_start_address;
|
||||
extern unsigned long kexec_indirection_page;
|
||||
extern unsigned long kexec_mach_type;
|
||||
extern unsigned long kexec_boot_atags;
|
||||
|
||||
static atomic_t waiting_for_crash_ipi;
|
||||
|
||||
/*
|
||||
@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
|
||||
void machine_kexec(struct kimage *image)
|
||||
{
|
||||
unsigned long page_list, reboot_entry_phys;
|
||||
struct kexec_relocate_data *data;
|
||||
void (*reboot_entry)(void);
|
||||
void *reboot_code_buffer;
|
||||
|
||||
@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
|
||||
|
||||
reboot_code_buffer = page_address(image->control_code_page);
|
||||
|
||||
/* Prepare parameters for reboot_code_buffer*/
|
||||
set_kernel_text_rw();
|
||||
kexec_start_address = image->start;
|
||||
kexec_indirection_page = page_list;
|
||||
kexec_mach_type = machine_arch_type;
|
||||
kexec_boot_atags = image->arch.kernel_r2;
|
||||
|
||||
/* copy our kernel relocation code to the control code page */
|
||||
reboot_entry = fncpy(reboot_code_buffer,
|
||||
&relocate_new_kernel,
|
||||
relocate_new_kernel_size);
|
||||
|
||||
data = reboot_code_buffer + relocate_new_kernel_size;
|
||||
data->kexec_start_address = image->start;
|
||||
data->kexec_indirection_page = page_list;
|
||||
data->kexec_mach_type = machine_arch_type;
|
||||
data->kexec_r2 = image->arch.kernel_r2;
|
||||
|
||||
/* get the identity mapping physical address for the reboot code */
|
||||
reboot_entry_phys = virt_to_idmap(reboot_entry);
|
||||
|
||||
|
@ -5,14 +5,16 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
.align 3 /* not needed for this code, but keeps fncpy() happy */
|
||||
|
||||
ENTRY(relocate_new_kernel)
|
||||
|
||||
ldr r0,kexec_indirection_page
|
||||
ldr r1,kexec_start_address
|
||||
adr r7, relocate_new_kernel_end
|
||||
ldr r0, [r7, #KEXEC_INDIR_PAGE]
|
||||
ldr r1, [r7, #KEXEC_START_ADDR]
|
||||
|
||||
/*
|
||||
* If there is no indirection page (we are doing crashdumps)
|
||||
@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
|
||||
|
||||
2:
|
||||
/* Jump to relocated kernel */
|
||||
mov lr,r1
|
||||
mov r0,#0
|
||||
ldr r1,kexec_mach_type
|
||||
ldr r2,kexec_boot_atags
|
||||
ARM( ret lr )
|
||||
THUMB( bx lr )
|
||||
|
||||
.align
|
||||
|
||||
.globl kexec_start_address
|
||||
kexec_start_address:
|
||||
.long 0x0
|
||||
|
||||
.globl kexec_indirection_page
|
||||
kexec_indirection_page:
|
||||
.long 0x0
|
||||
|
||||
.globl kexec_mach_type
|
||||
kexec_mach_type:
|
||||
.long 0x0
|
||||
|
||||
/* phy addr of the atags for the new kernel */
|
||||
.globl kexec_boot_atags
|
||||
kexec_boot_atags:
|
||||
.long 0x0
|
||||
mov lr, r1
|
||||
mov r0, #0
|
||||
ldr r1, [r7, #KEXEC_MACH_TYPE]
|
||||
ldr r2, [r7, #KEXEC_R2]
|
||||
ARM( ret lr )
|
||||
THUMB( bx lr )
|
||||
|
||||
ENDPROC(relocate_new_kernel)
|
||||
|
||||
.align 3
|
||||
relocate_new_kernel_end:
|
||||
|
||||
.globl relocate_new_kernel_size
|
||||
|
@ -693,18 +693,20 @@ struct page *get_signal_page(void)
|
||||
|
||||
addr = page_address(page);
|
||||
|
||||
/* Poison the entire page */
|
||||
memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
|
||||
PAGE_SIZE / sizeof(u32));
|
||||
|
||||
/* Give the signal return code some randomness */
|
||||
offset = 0x200 + (get_random_int() & 0x7fc);
|
||||
signal_return_offset = offset;
|
||||
|
||||
/*
|
||||
* Copy signal return handlers into the vector page, and
|
||||
* set sigreturn to be a pointer to these.
|
||||
*/
|
||||
/* Copy signal return handlers into the page */
|
||||
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
|
||||
|
||||
ptr = (unsigned long)addr + offset;
|
||||
flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
|
||||
/* Flush out all instructions in this page */
|
||||
ptr = (unsigned long)addr;
|
||||
flush_icache_range(ptr, ptr + PAGE_SIZE);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
(cx->mpu_logic_state == PWRDM_POWER_OFF);
|
||||
|
||||
/* Enter broadcast mode for periodic timers */
|
||||
tick_broadcast_enable();
|
||||
RCU_NONIDLE(tick_broadcast_enable());
|
||||
|
||||
/* Enter broadcast mode for one-shot timers */
|
||||
tick_broadcast_enter();
|
||||
RCU_NONIDLE(tick_broadcast_enter());
|
||||
|
||||
/*
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
|
||||
if (dev->cpu == 0) {
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
|
||||
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
|
||||
|
||||
/*
|
||||
* Call idle CPU cluster PM enter notifier chain
|
||||
@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
index = 0;
|
||||
cx = state_ptr + index;
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
|
||||
RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
|
||||
mpuss_can_lose_context = 0;
|
||||
}
|
||||
}
|
||||
@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
mpuss_can_lose_context)
|
||||
gic_dist_disable();
|
||||
|
||||
clkdm_deny_idle(cpu_clkdm[1]);
|
||||
omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
|
||||
clkdm_allow_idle(cpu_clkdm[1]);
|
||||
RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
|
||||
RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
|
||||
RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
|
||||
|
||||
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
|
||||
mpuss_can_lose_context) {
|
||||
@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
|
||||
cpu_pm_exit();
|
||||
|
||||
cpu_pm_out:
|
||||
tick_broadcast_exit();
|
||||
RCU_NONIDLE(tick_broadcast_exit());
|
||||
|
||||
fail:
|
||||
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
|
||||
|
@ -378,8 +378,6 @@ static int __init xen_guest_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
gnttab_init();
|
||||
if (!xen_initial_domain())
|
||||
xenbus_probe();
|
||||
|
||||
/*
|
||||
* Making sure board specific code will not set up ops for
|
||||
|
@ -415,7 +415,9 @@ &dsi0_phy {
|
||||
&gcc {
|
||||
protected-clocks = <GCC_QSPI_CORE_CLK>,
|
||||
<GCC_QSPI_CORE_CLK_SRC>,
|
||||
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
|
||||
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
|
||||
<GCC_LPASS_Q6_AXI_CLK>,
|
||||
<GCC_LPASS_SWAY_CLK>;
|
||||
};
|
||||
|
||||
&gpu {
|
||||
|
@ -245,7 +245,9 @@ &cdsp_pas {
|
||||
&gcc {
|
||||
protected-clocks = <GCC_QSPI_CORE_CLK>,
|
||||
<GCC_QSPI_CORE_CLK_SRC>,
|
||||
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
|
||||
<GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
|
||||
<GCC_LPASS_Q6_AXI_CLK>,
|
||||
<GCC_LPASS_SWAY_CLK>;
|
||||
};
|
||||
|
||||
&gpu {
|
||||
|
@ -114,6 +114,10 @@ &cpu3 {
|
||||
cpu-supply = <&vdd_arm>;
|
||||
};
|
||||
|
||||
&display_subsystem {
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
&gmac2io {
|
||||
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
|
||||
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
|
||||
|
@ -234,6 +234,7 @@ pcie0: pcie@f8000000 {
|
||||
reg = <0x0 0xf8000000 0x0 0x2000000>,
|
||||
<0x0 0xfd000000 0x0 0x1000000>;
|
||||
reg-names = "axi-base", "apb-base";
|
||||
device_type = "pci";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
@ -252,7 +253,6 @@ pcie0: pcie@f8000000 {
|
||||
<0 0 0 2 &pcie0_intc 1>,
|
||||
<0 0 0 3 &pcie0_intc 2>,
|
||||
<0 0 0 4 &pcie0_intc 3>;
|
||||
linux,pci-domain = <0>;
|
||||
max-link-speed = <1>;
|
||||
msi-map = <0x0 &its 0x0 0x1000>;
|
||||
phys = <&pcie_phy 0>, <&pcie_phy 1>,
|
||||
@ -1278,7 +1278,6 @@ vdec: video-codec@ff660000 {
|
||||
compatible = "rockchip,rk3399-vdec";
|
||||
reg = <0x0 0xff660000 0x0 0x400>;
|
||||
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
interrupt-names = "vdpu";
|
||||
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
|
||||
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
|
||||
clock-names = "axi", "ahb", "cabac", "core";
|
||||
|
@ -56,6 +56,8 @@ CONFIG_CP15_BARRIER_EMULATION=y
|
||||
CONFIG_SETEND_EMULATION=y
|
||||
CONFIG_RANDOMIZE_BASE=y
|
||||
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
|
||||
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off"
|
||||
CONFIG_CMDLINE_EXTEND=y
|
||||
# CONFIG_DMI is not set
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
@ -599,9 +601,11 @@ CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_HEADERS_INSTALL=y
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_KASAN=y
|
||||
CONFIG_KASAN_HW_TAGS=y
|
||||
CONFIG_KFENCE=y
|
||||
CONFIG_KFENCE_SAMPLE_INTERVAL=0
|
||||
CONFIG_PANIC_ON_OOPS=y
|
||||
|
@ -6,7 +6,6 @@
|
||||
#define __ASM_CACHE_H
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/mte-kasan.h>
|
||||
|
||||
#define CTR_L1IP_SHIFT 14
|
||||
#define CTR_L1IP_MASK 3
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mte-kasan.h>
|
||||
#include <asm/pgtable-types.h>
|
||||
|
||||
#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
|
||||
|
@ -11,4 +11,6 @@
|
||||
#define MTE_TAG_SIZE 4
|
||||
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
|
||||
|
||||
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
|
||||
|
||||
#endif /* __ASM_MTE_DEF_H */
|
||||
|
@ -11,12 +11,15 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* The functions below are meant to be used only for the
|
||||
* KASAN_HW_TAGS interface defined in asm/memory.h.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
|
||||
/*
|
||||
* These functions are meant to be only used from KASAN runtime through
|
||||
* the arch_*() interface defined in asm/memory.h.
|
||||
* These functions don't include system_supports_mte() checks,
|
||||
* as KASAN only calls them when MTE is supported and enabled.
|
||||
*/
|
||||
|
||||
static inline u8 mte_get_ptr_tag(void *ptr)
|
||||
{
|
||||
/* Note: The format of KASAN tags is 0xF<x> */
|
||||
@ -25,9 +28,54 @@ static inline u8 mte_get_ptr_tag(void *ptr)
|
||||
return tag;
|
||||
}
|
||||
|
||||
u8 mte_get_mem_tag(void *addr);
|
||||
u8 mte_get_random_tag(void);
|
||||
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag);
|
||||
/* Get allocation tag for the address. */
|
||||
static inline u8 mte_get_mem_tag(void *addr)
|
||||
{
|
||||
asm(__MTE_PREAMBLE "ldg %0, [%0]"
|
||||
: "+r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
/* Generate a random tag. */
|
||||
static inline u8 mte_get_random_tag(void)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
asm(__MTE_PREAMBLE "irg %0, %0"
|
||||
: "=r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assign allocation tags for a region of memory based on the pointer tag.
|
||||
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
|
||||
* size must be non-zero and MTE_GRANULE_SIZE aligned.
|
||||
*/
|
||||
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
{
|
||||
u64 curr, end;
|
||||
|
||||
if (!size)
|
||||
return;
|
||||
|
||||
curr = (u64)__tag_set(addr, tag);
|
||||
end = curr + size;
|
||||
|
||||
do {
|
||||
/*
|
||||
* 'asm volatile' is required to prevent the compiler to move
|
||||
* the statement outside of the loop.
|
||||
*/
|
||||
asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
|
||||
:
|
||||
: "r" (curr)
|
||||
: "memory");
|
||||
|
||||
curr += MTE_GRANULE_SIZE;
|
||||
} while (curr != end);
|
||||
}
|
||||
|
||||
void mte_enable_kernel(void);
|
||||
void mte_init_tags(u64 max_tag);
|
||||
@ -46,13 +94,14 @@ static inline u8 mte_get_mem_tag(void *addr)
|
||||
{
|
||||
return 0xFF;
|
||||
}
|
||||
|
||||
static inline u8 mte_get_random_tag(void)
|
||||
{
|
||||
return 0xFF;
|
||||
}
|
||||
static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
|
||||
static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void mte_enable_kernel(void)
|
||||
|
@ -8,8 +8,6 @@
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/mte-def.h>
|
||||
|
||||
#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n"
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
|
@ -1883,16 +1883,12 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
static bool cleared_zero_page = false;
|
||||
|
||||
/*
|
||||
* Clear the tags in the zero page. This needs to be done via the
|
||||
* linear map which has the Tagged attribute.
|
||||
*/
|
||||
if (!cleared_zero_page) {
|
||||
cleared_zero_page = true;
|
||||
if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
|
||||
mte_clear_page_tags(lm_alias(empty_zero_page));
|
||||
}
|
||||
|
||||
kasan_init_hw_tags_cpu();
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/mte.h>
|
||||
#include <asm/mte-kasan.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
@ -88,51 +87,6 @@ int memcmp_pages(struct page *page1, struct page *page2)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u8 mte_get_mem_tag(void *addr)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return 0xFF;
|
||||
|
||||
asm(__MTE_PREAMBLE "ldg %0, [%0]"
|
||||
: "+r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
u8 mte_get_random_tag(void)
|
||||
{
|
||||
void *addr;
|
||||
|
||||
if (!system_supports_mte())
|
||||
return 0xFF;
|
||||
|
||||
asm(__MTE_PREAMBLE "irg %0, %0"
|
||||
: "+r" (addr));
|
||||
|
||||
return mte_get_ptr_tag(addr);
|
||||
}
|
||||
|
||||
void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
|
||||
{
|
||||
void *ptr = addr;
|
||||
|
||||
if ((!system_supports_mte()) || (size == 0))
|
||||
return addr;
|
||||
|
||||
/* Make sure that size is MTE granule aligned. */
|
||||
WARN_ON(size & (MTE_GRANULE_SIZE - 1));
|
||||
|
||||
/* Make sure that the address is MTE granule aligned. */
|
||||
WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1));
|
||||
|
||||
tag = 0xF0 | tag;
|
||||
ptr = (void *)__tag_set(ptr, tag);
|
||||
|
||||
mte_assign_mem_tag_range(ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void mte_init_tags(u64 max_tag)
|
||||
{
|
||||
static bool gcr_kernel_excl_initialized;
|
||||
@ -341,11 +295,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
|
||||
* would cause the existing tags to be cleared if the page
|
||||
* was never mapped with PROT_MTE.
|
||||
*/
|
||||
if (!test_bit(PG_mte_tagged, &page->flags)) {
|
||||
if (!(vma->vm_flags & VM_MTE)) {
|
||||
ret = -EOPNOTSUPP;
|
||||
put_page(page);
|
||||
break;
|
||||
}
|
||||
WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
|
||||
|
||||
/* limit access to the end of the page */
|
||||
offset = offset_in_page(addr);
|
||||
|
@ -149,19 +149,3 @@ SYM_FUNC_START(mte_restore_page_tags)
|
||||
|
||||
ret
|
||||
SYM_FUNC_END(mte_restore_page_tags)
|
||||
|
||||
/*
|
||||
* Assign allocation tags for a region of memory based on the pointer tag
|
||||
* x0 - source pointer
|
||||
* x1 - size
|
||||
*
|
||||
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
|
||||
* size must be non-zero and MTE_GRANULE_SIZE aligned.
|
||||
*/
|
||||
SYM_FUNC_START(mte_assign_mem_tag_range)
|
||||
1: stg x0, [x0]
|
||||
add x0, x0, #MTE_GRANULE_SIZE
|
||||
subs x1, x1, #MTE_GRANULE_SIZE
|
||||
b.gt 1b
|
||||
ret
|
||||
SYM_FUNC_END(mte_assign_mem_tag_range)
|
||||
|
@ -63,6 +63,9 @@ int main(void)
|
||||
OFFSET(TI_FLAGS, thread_info, flags);
|
||||
OFFSET(TI_CPU, thread_info, cpu);
|
||||
OFFSET(TI_PRE, thread_info, preempt_count);
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -90,7 +90,6 @@ ð0 {
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-id0007.0771";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
|
||||
#define virt_addr_valid(vaddr) ({ \
|
||||
unsigned long _addr = (unsigned long)vaddr; \
|
||||
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
|
||||
})
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
|
||||
|
||||
|
@ -57,6 +57,9 @@ export BITS
|
||||
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
|
||||
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
|
||||
|
||||
# Intel CET isn't enabled in the kernel
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
||||
|
||||
ifeq ($(CONFIG_X86_32),y)
|
||||
BITS := 32
|
||||
UTS_MACHINE := i386
|
||||
@ -127,9 +130,6 @@ else
|
||||
|
||||
KBUILD_CFLAGS += -mno-red-zone
|
||||
KBUILD_CFLAGS += -mcmodel=kernel
|
||||
|
||||
# Intel CET isn't enabled in the kernel
|
||||
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
|
||||
endif
|
||||
|
||||
ifdef CONFIG_X86_X32
|
||||
|
@ -50,6 +50,8 @@ CONFIG_HYPERVISOR_GUEST=y
|
||||
CONFIG_PARAVIRT=y
|
||||
CONFIG_NR_CPUS=32
|
||||
CONFIG_EFI=y
|
||||
CONFIG_CMDLINE_BOOL=y
|
||||
CONFIG_CMDLINE="stack_depot_disable=on"
|
||||
CONFIG_PM_WAKELOCKS=y
|
||||
CONFIG_PM_WAKELOCKS_LIMIT=0
|
||||
# CONFIG_PM_WAKELOCKS_GC is not set
|
||||
@ -543,7 +545,7 @@ CONFIG_DEBUG_INFO_DWARF4=y
|
||||
CONFIG_HEADERS_INSTALL=y
|
||||
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_KFENCE=y
|
||||
|
@ -1159,6 +1159,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -1829,6 +1829,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
|
||||
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
|
||||
arch_turbo_freq_ratio;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
|
||||
|
||||
static bool turbo_disabled(void)
|
||||
{
|
||||
|
@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
|
||||
|
||||
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
||||
{
|
||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||
bool vmcb12_lma;
|
||||
|
||||
if ((vmcb12->save.efer & EFER_SVME) == 0)
|
||||
@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
||||
|
||||
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
|
||||
|
||||
if (!vmcb12_lma) {
|
||||
if (vmcb12->save.cr4 & X86_CR4_PAE) {
|
||||
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
|
||||
return false;
|
||||
} else {
|
||||
if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (vmcb12_lma) {
|
||||
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
|
||||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
|
||||
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
|
||||
(vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
|
||||
return false;
|
||||
}
|
||||
if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
|
||||
|
@ -346,9 +346,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
|
||||
}
|
||||
|
||||
/* svm.c */
|
||||
#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
|
||||
#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
|
||||
#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
|
||||
#define MSR_INVALID 0xffffffffU
|
||||
|
||||
u32 svm_msrpm_offset(u32 msr);
|
||||
|
@ -9558,6 +9558,8 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
if (!(sregs->cr4 & X86_CR4_PAE)
|
||||
|| !(sregs->efer & EFER_LMA))
|
||||
return -EINVAL;
|
||||
if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/*
|
||||
* Not in 64-bit mode: EFER.LMA is clear and the code
|
||||
|
@ -9,16 +9,23 @@
|
||||
in the right sequence from here. */
|
||||
static __init int pci_arch_init(void)
|
||||
{
|
||||
int type;
|
||||
|
||||
x86_create_pci_msi_domain();
|
||||
int type, pcbios = 1;
|
||||
|
||||
type = pci_direct_probe();
|
||||
|
||||
if (!(pci_probe & PCI_PROBE_NOEARLY))
|
||||
pci_mmcfg_early_init();
|
||||
|
||||
if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
|
||||
if (x86_init.pci.arch_init)
|
||||
pcbios = x86_init.pci.arch_init();
|
||||
|
||||
/*
|
||||
* Must happen after x86_init.pci.arch_init(). Xen sets up the
|
||||
* x86_init.irqs.create_pci_msi_domain there.
|
||||
*/
|
||||
x86_create_pci_msi_domain();
|
||||
|
||||
if (!pcbios)
|
||||
return 0;
|
||||
|
||||
pci_pcbios_init();
|
||||
|
@ -115,31 +115,12 @@ void efi_sync_low_kernel_mappings(void)
|
||||
pud_t *pud_k, *pud_efi;
|
||||
pgd_t *efi_pgd = efi_mm.pgd;
|
||||
|
||||
/*
|
||||
* We can share all PGD entries apart from the one entry that
|
||||
* covers the EFI runtime mapping space.
|
||||
*
|
||||
* Make sure the EFI runtime region mappings are guaranteed to
|
||||
* only span a single PGD entry and that the entry also maps
|
||||
* other important kernel regions.
|
||||
*/
|
||||
MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
|
||||
MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
|
||||
(EFI_VA_END & PGDIR_MASK));
|
||||
|
||||
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
|
||||
pgd_k = pgd_offset_k(PAGE_OFFSET);
|
||||
|
||||
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
|
||||
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
|
||||
|
||||
/*
|
||||
* As with PGDs, we share all P4D entries apart from the one entry
|
||||
* that covers the EFI runtime mapping space.
|
||||
*/
|
||||
BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
|
||||
BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
|
||||
|
||||
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
|
||||
pgd_k = pgd_offset_k(EFI_VA_END);
|
||||
p4d_efi = p4d_offset(pgd_efi, 0);
|
||||
|
@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
||||
* limit 'something'.
|
||||
*/
|
||||
/* no more than 50% of tags for async I/O */
|
||||
bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
|
||||
bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
|
||||
/*
|
||||
* no more than 75% of tags for sync writes (25% extra tags
|
||||
* w.r.t. async I/O, to prevent async I/O from starving sync
|
||||
* writes)
|
||||
*/
|
||||
bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
|
||||
bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
|
||||
|
||||
/*
|
||||
* In-word depths in case some bfq_queue is being weight-
|
||||
@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
|
||||
* shortage.
|
||||
*/
|
||||
/* no more than ~18% of tags for async I/O */
|
||||
bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
|
||||
bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
|
||||
/* no more than ~37% of tags for sync writes (~20% extra tags) */
|
||||
bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
|
||||
bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
for (j = 0; j < 2; j++)
|
||||
|
@ -5,7 +5,6 @@ function update_kasan_config() {
|
||||
${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
|
||||
-e CONFIG_KASAN \
|
||||
-e CONFIG_KASAN_INLINE \
|
||||
-e CONFIG_KASAN_PANIC_ON_WARN \
|
||||
-e CONFIG_KCOV \
|
||||
-e CONFIG_PANIC_ON_WARN_DEFAULT_ENABLE \
|
||||
-d CONFIG_RANDOMIZE_BASE \
|
||||
|
@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
|
||||
max_m = cmp->m.max ?: 1 << cmp->m.width;
|
||||
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
|
||||
|
||||
if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
|
||||
if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
|
||||
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
|
||||
rate = *parent_rate / p / m;
|
||||
} else {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <acpi/cppc_acpi.h>
|
||||
|
||||
#include <asm/msr.h>
|
||||
#include <asm/processor.h>
|
||||
@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
|
||||
unsigned int resume;
|
||||
unsigned int cpu_feature;
|
||||
unsigned int acpi_perf_cpu;
|
||||
unsigned int first_perf_state;
|
||||
cpumask_var_t freqdomain_cpus;
|
||||
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
|
||||
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
|
||||
@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
|
||||
|
||||
perf = to_perf_data(data);
|
||||
|
||||
cpufreq_for_each_entry(pos, policy->freq_table)
|
||||
cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
|
||||
if (msr == perf->states[pos->driver_data].status)
|
||||
return pos->frequency;
|
||||
return policy->freq_table[0].frequency;
|
||||
return policy->freq_table[data->first_perf_state].frequency;
|
||||
}
|
||||
|
||||
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
|
||||
@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||
struct cpufreq_policy *policy;
|
||||
unsigned int freq;
|
||||
unsigned int cached_freq;
|
||||
unsigned int state;
|
||||
|
||||
pr_debug("%s (%d)\n", __func__, cpu);
|
||||
|
||||
@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
|
||||
if (unlikely(!data || !policy->freq_table))
|
||||
return 0;
|
||||
|
||||
cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
|
||||
state = to_perf_data(data)->state;
|
||||
if (state < data->first_perf_state)
|
||||
state = data->first_perf_state;
|
||||
|
||||
cached_freq = policy->freq_table[state].frequency;
|
||||
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
|
||||
if (freq != cached_freq) {
|
||||
/*
|
||||
@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
static u64 get_max_boost_ratio(unsigned int cpu)
|
||||
{
|
||||
struct cppc_perf_caps perf_caps;
|
||||
u64 highest_perf, nominal_perf;
|
||||
int ret;
|
||||
|
||||
if (acpi_pstate_strict)
|
||||
return 0;
|
||||
|
||||
ret = cppc_get_perf_caps(cpu, &perf_caps);
|
||||
if (ret) {
|
||||
pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
|
||||
cpu, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
highest_perf = perf_caps.highest_perf;
|
||||
nominal_perf = perf_caps.nominal_perf;
|
||||
|
||||
if (!highest_perf || !nominal_perf) {
|
||||
pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (highest_perf < nominal_perf) {
|
||||
pr_debug("CPU%d: nominal performance above highest\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
|
||||
}
|
||||
#else
|
||||
static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
|
||||
#endif
|
||||
|
||||
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int valid_states = 0;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct acpi_cpufreq_data *data;
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
struct cpufreq_frequency_table *freq_table;
|
||||
struct acpi_processor_performance *perf;
|
||||
struct acpi_cpufreq_data *data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
unsigned int valid_states = 0;
|
||||
unsigned int result = 0;
|
||||
unsigned int state_count;
|
||||
u64 max_boost_ratio;
|
||||
unsigned int i;
|
||||
#ifdef CONFIG_SMP
|
||||
static int blacklisted;
|
||||
#endif
|
||||
@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
goto err_unreg;
|
||||
}
|
||||
|
||||
freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
|
||||
GFP_KERNEL);
|
||||
state_count = perf->state_count + 1;
|
||||
|
||||
max_boost_ratio = get_max_boost_ratio(cpu);
|
||||
if (max_boost_ratio) {
|
||||
/*
|
||||
* Make a room for one more entry to represent the highest
|
||||
* available "boost" frequency.
|
||||
*/
|
||||
state_count++;
|
||||
valid_states++;
|
||||
data->first_perf_state = valid_states;
|
||||
} else {
|
||||
/*
|
||||
* If the maximum "boost" frequency is unknown, ask the arch
|
||||
* scale-invariance code to use the "nominal" performance for
|
||||
* CPU utilization scaling so as to prevent the schedutil
|
||||
* governor from selecting inadequate CPU frequencies.
|
||||
*/
|
||||
arch_set_max_freq_ratio(true);
|
||||
}
|
||||
|
||||
freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
|
||||
if (!freq_table) {
|
||||
result = -ENOMEM;
|
||||
goto err_unreg;
|
||||
@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
valid_states++;
|
||||
}
|
||||
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
if (max_boost_ratio) {
|
||||
unsigned int state = data->first_perf_state;
|
||||
unsigned int freq = freq_table[state].frequency;
|
||||
|
||||
/*
|
||||
* Because the loop above sorts the freq_table entries in the
|
||||
* descending order, freq is the maximum frequency in the table.
|
||||
* Assume that it corresponds to the CPPC nominal frequency and
|
||||
* use it to populate the frequency field of the extra "boost"
|
||||
* frequency entry.
|
||||
*/
|
||||
freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
|
||||
/*
|
||||
* The purpose of the extra "boost" frequency entry is to make
|
||||
* the rest of cpufreq aware of the real maximum frequency, but
|
||||
* the way to request it is the same as for the first_perf_state
|
||||
* entry that is expected to cover the entire range of "boost"
|
||||
* frequencies of the CPU, so copy the driver_data value from
|
||||
* that entry.
|
||||
*/
|
||||
freq_table[0].driver_data = freq_table[state].driver_data;
|
||||
}
|
||||
|
||||
policy->freq_table = freq_table;
|
||||
perf->state = 0;
|
||||
|
||||
@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
|
||||
policy->cpu);
|
||||
struct acpi_cpufreq_data *data = policy->driver_data;
|
||||
unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
|
||||
|
||||
if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
|
||||
if (perf->states[0].core_frequency * 1000 != freq)
|
||||
pr_warn(FW_WARN "P-state 0 is not max freq\n");
|
||||
}
|
||||
|
||||
|
@ -239,7 +239,7 @@ struct shrinker pool_shrinker = {
|
||||
.batch = 0,
|
||||
};
|
||||
|
||||
int dmabuf_page_pool_init_shrinker(void)
|
||||
static int dmabuf_page_pool_init_shrinker(void)
|
||||
{
|
||||
return register_shrinker(&pool_shrinker);
|
||||
}
|
||||
|
@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
|
||||
"%s called while %d clients hold a reference\n",
|
||||
__func__, chan->client_count);
|
||||
mutex_lock(&dma_list_mutex);
|
||||
list_del(&chan->device_node);
|
||||
device->chancnt--;
|
||||
chan->dev->chan = NULL;
|
||||
mutex_unlock(&dma_list_mutex);
|
||||
|
@ -325,17 +325,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool idxd_device_is_halted(struct idxd_device *idxd)
|
||||
{
|
||||
union gensts_reg gensts;
|
||||
|
||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||
|
||||
return (gensts.state == IDXD_DEVICE_STATE_HALT);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is function is only used for reset during probe and will
|
||||
* poll for completion. Once the device is setup with interrupts,
|
||||
* all commands will be done via interrupt completion.
|
||||
*/
|
||||
void idxd_device_init_reset(struct idxd_device *idxd)
|
||||
int idxd_device_init_reset(struct idxd_device *idxd)
|
||||
{
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
union idxd_command_reg cmd;
|
||||
unsigned long flags;
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd = IDXD_CMD_RESET_DEVICE;
|
||||
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
|
||||
@ -346,6 +360,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
|
||||
IDXD_CMDSTS_ACTIVE)
|
||||
cpu_relax();
|
||||
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
@ -355,6 +370,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
unsigned long flags;
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
*status = IDXD_CMDSTS_HW_ERR;
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.cmd = cmd_code;
|
||||
cmd.operand = operand;
|
||||
|
@ -214,5 +214,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
|
||||
|
||||
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
|
||||
struct dma_chan *chan = &wq->dma_chan;
|
||||
|
||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
|
||||
list_del(&chan->device_node);
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
|
||||
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
|
||||
|
||||
/* device control */
|
||||
void idxd_device_init_reset(struct idxd_device *idxd);
|
||||
int idxd_device_init_reset(struct idxd_device *idxd);
|
||||
int idxd_device_enable(struct idxd_device *idxd);
|
||||
int idxd_device_disable(struct idxd_device *idxd);
|
||||
void idxd_device_reset(struct idxd_device *idxd);
|
||||
|
@ -289,7 +289,10 @@ static int idxd_probe(struct idxd_device *idxd)
|
||||
int rc;
|
||||
|
||||
dev_dbg(dev, "%s entered and resetting device\n", __func__);
|
||||
idxd_device_init_reset(idxd);
|
||||
rc = idxd_device_init_reset(idxd);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
dev_dbg(dev, "IDXD reset complete\n");
|
||||
|
||||
idxd_read_caps(idxd);
|
||||
|
@ -53,19 +53,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = irq_entry->idxd;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
union gensts_reg gensts;
|
||||
u32 cause, val = 0;
|
||||
u32 val = 0;
|
||||
int i;
|
||||
bool err = false;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
if (cause & IDXD_INTC_ERR) {
|
||||
spin_lock_bh(&idxd->dev_lock);
|
||||
for (i = 0; i < 4; i++)
|
||||
@ -123,7 +118,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
val);
|
||||
|
||||
if (!err)
|
||||
goto out;
|
||||
return 0;
|
||||
|
||||
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
|
||||
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
|
||||
@ -144,10 +139,33 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
|
||||
"FLR" : "system reset");
|
||||
spin_unlock_bh(&idxd->dev_lock);
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
irqreturn_t idxd_misc_thread(int vec, void *data)
|
||||
{
|
||||
struct idxd_irq_entry *irq_entry = data;
|
||||
struct idxd_device *idxd = irq_entry->idxd;
|
||||
int rc;
|
||||
u32 cause;
|
||||
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
|
||||
while (cause) {
|
||||
rc = process_misc_interrupts(idxd, cause);
|
||||
if (rc < 0)
|
||||
break;
|
||||
cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
if (cause)
|
||||
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
|
||||
}
|
||||
|
||||
idxd_unmask_msix_vector(idxd, irq_entry->id);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -428,8 +428,9 @@ config GPIO_MXC
|
||||
select GENERIC_IRQ_CHIP
|
||||
|
||||
config GPIO_MXS
|
||||
def_bool y
|
||||
bool "Freescale MXS GPIO support" if COMPILE_TEST
|
||||
depends on ARCH_MXS || COMPILE_TEST
|
||||
default y if ARCH_MXS
|
||||
select GPIO_GENERIC
|
||||
select GENERIC_IRQ_CHIP
|
||||
|
||||
|
@ -25,6 +25,9 @@
|
||||
/* Maximum value for gpio line identifiers */
|
||||
#define EP93XX_GPIO_LINE_MAX 63
|
||||
|
||||
/* Number of GPIO chips in EP93XX */
|
||||
#define EP93XX_GPIO_CHIP_NUM 8
|
||||
|
||||
/* Maximum value for irq capable line identifiers */
|
||||
#define EP93XX_GPIO_LINE_MAX_IRQ 23
|
||||
|
||||
@ -34,74 +37,75 @@
|
||||
*/
|
||||
#define EP93XX_GPIO_F_IRQ_BASE 80
|
||||
|
||||
struct ep93xx_gpio_irq_chip {
|
||||
struct irq_chip ic;
|
||||
u8 irq_offset;
|
||||
u8 int_unmasked;
|
||||
u8 int_enabled;
|
||||
u8 int_type1;
|
||||
u8 int_type2;
|
||||
u8 int_debounce;
|
||||
};
|
||||
|
||||
struct ep93xx_gpio_chip {
|
||||
struct gpio_chip gc;
|
||||
struct ep93xx_gpio_irq_chip *eic;
|
||||
};
|
||||
|
||||
struct ep93xx_gpio {
|
||||
void __iomem *base;
|
||||
struct gpio_chip gc[8];
|
||||
struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
|
||||
};
|
||||
|
||||
#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
|
||||
|
||||
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
|
||||
{
|
||||
struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
|
||||
|
||||
return egc->eic;
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
* Interrupt handling for EP93xx on-chip GPIOs
|
||||
*************************************************************************/
|
||||
static unsigned char gpio_int_unmasked[3];
|
||||
static unsigned char gpio_int_enabled[3];
|
||||
static unsigned char gpio_int_type1[3];
|
||||
static unsigned char gpio_int_type2[3];
|
||||
static unsigned char gpio_int_debounce[3];
|
||||
#define EP93XX_INT_TYPE1_OFFSET 0x00
|
||||
#define EP93XX_INT_TYPE2_OFFSET 0x04
|
||||
#define EP93XX_INT_EOI_OFFSET 0x08
|
||||
#define EP93XX_INT_EN_OFFSET 0x0c
|
||||
#define EP93XX_INT_STATUS_OFFSET 0x10
|
||||
#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
|
||||
#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
|
||||
|
||||
/* Port ordering is: A B F */
|
||||
static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
|
||||
static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
|
||||
static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
|
||||
static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
|
||||
static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
|
||||
|
||||
static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
|
||||
static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
|
||||
struct ep93xx_gpio_irq_chip *eic)
|
||||
{
|
||||
BUG_ON(port > 2);
|
||||
writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
|
||||
|
||||
writeb_relaxed(0, epg->base + int_en_register_offset[port]);
|
||||
writeb_relaxed(eic->int_type2,
|
||||
epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
|
||||
|
||||
writeb_relaxed(gpio_int_type2[port],
|
||||
epg->base + int_type2_register_offset[port]);
|
||||
writeb_relaxed(eic->int_type1,
|
||||
epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
|
||||
|
||||
writeb_relaxed(gpio_int_type1[port],
|
||||
epg->base + int_type1_register_offset[port]);
|
||||
|
||||
writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
|
||||
epg->base + int_en_register_offset[port]);
|
||||
}
|
||||
|
||||
static int ep93xx_gpio_port(struct gpio_chip *gc)
|
||||
{
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = 0;
|
||||
|
||||
while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
|
||||
port++;
|
||||
|
||||
/* This should not happen but is there as a last safeguard */
|
||||
if (port == ARRAY_SIZE(epg->gc)) {
|
||||
pr_crit("can't find the GPIO port\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return port;
|
||||
writeb_relaxed(eic->int_unmasked & eic->int_enabled,
|
||||
epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
|
||||
}
|
||||
|
||||
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
|
||||
unsigned int offset, bool enable)
|
||||
{
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
int port_mask = BIT(offset);
|
||||
|
||||
if (enable)
|
||||
gpio_int_debounce[port] |= port_mask;
|
||||
eic->int_debounce |= port_mask;
|
||||
else
|
||||
gpio_int_debounce[port] &= ~port_mask;
|
||||
eic->int_debounce &= ~port_mask;
|
||||
|
||||
writeb(gpio_int_debounce[port],
|
||||
epg->base + int_debounce_register_offset[port]);
|
||||
writeb(eic->int_debounce,
|
||||
epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
|
||||
}
|
||||
|
||||
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
|
||||
@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
|
||||
*/
|
||||
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
|
||||
for_each_set_bit(offset, &stat, 8)
|
||||
generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
|
||||
generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
|
||||
offset));
|
||||
|
||||
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
|
||||
for_each_set_bit(offset, &stat, 8)
|
||||
generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
|
||||
generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
|
||||
offset));
|
||||
|
||||
chained_irq_exit(irqchip, desc);
|
||||
@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
|
||||
static void ep93xx_gpio_irq_ack(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
int port_mask = BIT(d->irq & 7);
|
||||
|
||||
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
|
||||
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
|
||||
ep93xx_gpio_update_int_params(epg, port);
|
||||
eic->int_type2 ^= port_mask; /* switch edge direction */
|
||||
ep93xx_gpio_update_int_params(epg, eic);
|
||||
}
|
||||
|
||||
writeb(port_mask, epg->base + eoi_register_offset[port]);
|
||||
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
|
||||
}
|
||||
|
||||
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
int port_mask = BIT(d->irq & 7);
|
||||
|
||||
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
|
||||
gpio_int_type2[port] ^= port_mask; /* switch edge direction */
|
||||
eic->int_type2 ^= port_mask; /* switch edge direction */
|
||||
|
||||
gpio_int_unmasked[port] &= ~port_mask;
|
||||
ep93xx_gpio_update_int_params(epg, port);
|
||||
eic->int_unmasked &= ~port_mask;
|
||||
ep93xx_gpio_update_int_params(epg, eic);
|
||||
|
||||
writeb(port_mask, epg->base + eoi_register_offset[port]);
|
||||
writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
|
||||
}
|
||||
|
||||
static void ep93xx_gpio_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
|
||||
gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
|
||||
ep93xx_gpio_update_int_params(epg, port);
|
||||
eic->int_unmasked &= ~BIT(d->irq & 7);
|
||||
ep93xx_gpio_update_int_params(epg, eic);
|
||||
}
|
||||
|
||||
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
|
||||
gpio_int_unmasked[port] |= BIT(d->irq & 7);
|
||||
ep93xx_gpio_update_int_params(epg, port);
|
||||
eic->int_unmasked |= BIT(d->irq & 7);
|
||||
ep93xx_gpio_update_int_params(epg, eic);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
|
||||
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
|
||||
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
|
||||
int port = ep93xx_gpio_port(gc);
|
||||
int offset = d->irq & 7;
|
||||
int port_mask = BIT(offset);
|
||||
irq_flow_handler_t handler;
|
||||
@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
gpio_int_type1[port] |= port_mask;
|
||||
gpio_int_type2[port] |= port_mask;
|
||||
eic->int_type1 |= port_mask;
|
||||
eic->int_type2 |= port_mask;
|
||||
handler = handle_edge_irq;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
gpio_int_type1[port] |= port_mask;
|
||||
gpio_int_type2[port] &= ~port_mask;
|
||||
eic->int_type1 |= port_mask;
|
||||
eic->int_type2 &= ~port_mask;
|
||||
handler = handle_edge_irq;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
gpio_int_type1[port] &= ~port_mask;
|
||||
gpio_int_type2[port] |= port_mask;
|
||||
eic->int_type1 &= ~port_mask;
|
||||
eic->int_type2 |= port_mask;
|
||||
handler = handle_level_irq;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
gpio_int_type1[port] &= ~port_mask;
|
||||
gpio_int_type2[port] &= ~port_mask;
|
||||
eic->int_type1 &= ~port_mask;
|
||||
eic->int_type2 &= ~port_mask;
|
||||
handler = handle_level_irq;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
gpio_int_type1[port] |= port_mask;
|
||||
eic->int_type1 |= port_mask;
|
||||
/* set initial polarity based on current input level */
|
||||
if (gc->get(gc, offset))
|
||||
gpio_int_type2[port] &= ~port_mask; /* falling */
|
||||
eic->int_type2 &= ~port_mask; /* falling */
|
||||
else
|
||||
gpio_int_type2[port] |= port_mask; /* rising */
|
||||
eic->int_type2 |= port_mask; /* rising */
|
||||
handler = handle_edge_irq;
|
||||
break;
|
||||
default:
|
||||
@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
|
||||
|
||||
irq_set_handler_locked(d, handler);
|
||||
|
||||
gpio_int_enabled[port] |= port_mask;
|
||||
eic->int_enabled |= port_mask;
|
||||
|
||||
ep93xx_gpio_update_int_params(epg, port);
|
||||
ep93xx_gpio_update_int_params(epg, eic);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip ep93xx_gpio_irq_chip = {
|
||||
.name = "GPIO",
|
||||
.irq_ack = ep93xx_gpio_irq_ack,
|
||||
.irq_mask_ack = ep93xx_gpio_irq_mask_ack,
|
||||
.irq_mask = ep93xx_gpio_irq_mask,
|
||||
.irq_unmask = ep93xx_gpio_irq_unmask,
|
||||
.irq_set_type = ep93xx_gpio_irq_type,
|
||||
};
|
||||
|
||||
/*************************************************************************
|
||||
* gpiolib interface for EP93xx on-chip GPIOs
|
||||
*************************************************************************/
|
||||
@ -276,17 +271,19 @@ struct ep93xx_gpio_bank {
|
||||
const char *label;
|
||||
int data;
|
||||
int dir;
|
||||
int irq;
|
||||
int base;
|
||||
bool has_irq;
|
||||
bool has_hierarchical_irq;
|
||||
unsigned int irq_base;
|
||||
};
|
||||
|
||||
#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
|
||||
#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
|
||||
{ \
|
||||
.label = _label, \
|
||||
.data = _data, \
|
||||
.dir = _dir, \
|
||||
.irq = _irq, \
|
||||
.base = _base, \
|
||||
.has_irq = _has_irq, \
|
||||
.has_hierarchical_irq = _has_hier, \
|
||||
@ -295,16 +292,16 @@ struct ep93xx_gpio_bank {
|
||||
|
||||
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
|
||||
/* Bank A has 8 IRQs */
|
||||
EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
|
||||
EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64),
|
||||
/* Bank B has 8 IRQs */
|
||||
EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
|
||||
EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
|
||||
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
|
||||
EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
|
||||
EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72),
|
||||
EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
|
||||
EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
|
||||
EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
|
||||
/* Bank F has 8 IRQs */
|
||||
EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
|
||||
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
|
||||
EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
|
||||
EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0),
|
||||
EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
|
||||
EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
|
||||
};
|
||||
|
||||
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
|
||||
@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
|
||||
return EP93XX_GPIO_F_IRQ_BASE + offset;
|
||||
}
|
||||
|
||||
static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
|
||||
static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
|
||||
{
|
||||
ic->irq_ack = ep93xx_gpio_irq_ack;
|
||||
ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
|
||||
ic->irq_mask = ep93xx_gpio_irq_mask;
|
||||
ic->irq_unmask = ep93xx_gpio_irq_unmask;
|
||||
ic->irq_set_type = ep93xx_gpio_irq_type;
|
||||
}
|
||||
|
||||
static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
|
||||
struct platform_device *pdev,
|
||||
struct ep93xx_gpio *epg,
|
||||
struct ep93xx_gpio_bank *bank)
|
||||
{
|
||||
void __iomem *data = epg->base + bank->data;
|
||||
void __iomem *dir = epg->base + bank->dir;
|
||||
struct gpio_chip *gc = &egc->gc;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct gpio_irq_chip *girq;
|
||||
int err;
|
||||
@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
|
||||
|
||||
girq = &gc->irq;
|
||||
if (bank->has_irq || bank->has_hierarchical_irq) {
|
||||
struct irq_chip *ic;
|
||||
|
||||
gc->set_config = ep93xx_gpio_set_config;
|
||||
girq->chip = &ep93xx_gpio_irq_chip;
|
||||
egc->eic = devm_kcalloc(dev, 1,
|
||||
sizeof(*egc->eic),
|
||||
GFP_KERNEL);
|
||||
if (!egc->eic)
|
||||
return -ENOMEM;
|
||||
egc->eic->irq_offset = bank->irq;
|
||||
ic = &egc->eic->ic;
|
||||
ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
|
||||
if (!ic->name)
|
||||
return -ENOMEM;
|
||||
ep93xx_init_irq_chip(dev, ic);
|
||||
girq->chip = ic;
|
||||
}
|
||||
|
||||
if (bank->has_irq) {
|
||||
@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
|
||||
gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
|
||||
irq_set_chip_data(gpio_irq, &epg->gc[5]);
|
||||
irq_set_chip_and_handler(gpio_irq,
|
||||
&ep93xx_gpio_irq_chip,
|
||||
girq->chip,
|
||||
handle_level_irq);
|
||||
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
|
||||
}
|
||||
@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
|
||||
return PTR_ERR(epg->base);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
|
||||
struct gpio_chip *gc = &epg->gc[i];
|
||||
struct ep93xx_gpio_chip *gc = &epg->gc[i];
|
||||
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
|
||||
|
||||
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
|
||||
|
@ -1792,8 +1792,8 @@ static void emulated_link_detect(struct dc_link *link)
|
||||
link->type = dc_connection_none;
|
||||
prev_sink = link->local_sink;
|
||||
|
||||
if (prev_sink != NULL)
|
||||
dc_sink_retain(prev_sink);
|
||||
if (prev_sink)
|
||||
dc_sink_release(prev_sink);
|
||||
|
||||
switch (link->connector_signal) {
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A: {
|
||||
@ -2261,8 +2261,10 @@ void amdgpu_dm_update_connector_after_detect(
|
||||
* TODO: check if we still need the S3 mode update workaround.
|
||||
* If yes, put it here.
|
||||
*/
|
||||
if (aconnector->dc_sink)
|
||||
if (aconnector->dc_sink) {
|
||||
amdgpu_dm_update_freesync_caps(connector, NULL);
|
||||
dc_sink_release(aconnector->dc_sink);
|
||||
}
|
||||
|
||||
aconnector->dc_sink = sink;
|
||||
dc_sink_retain(aconnector->dc_sink);
|
||||
@ -7870,14 +7872,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(conn_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto out;
|
||||
|
||||
/* Attach crtc to drm_atomic_state*/
|
||||
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(crtc_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto out;
|
||||
|
||||
/* force a restore */
|
||||
crtc_state->mode_changed = true;
|
||||
@ -7887,17 +7889,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(plane_state);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
goto out;
|
||||
|
||||
/* Call commit internally with the state we just constructed */
|
||||
ret = drm_atomic_commit(state);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
err:
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
out:
|
||||
drm_atomic_state_put(state);
|
||||
if (ret)
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -828,6 +828,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
if (computed_streams[i])
|
||||
continue;
|
||||
|
||||
if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
|
||||
mutex_lock(&aconnector->mst_mgr.lock);
|
||||
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
|
||||
mutex_unlock(&aconnector->mst_mgr.lock);
|
||||
@ -845,7 +848,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
|
||||
stream = dc_state->streams[i];
|
||||
|
||||
if (stream->timing.flags.DSC == 1)
|
||||
dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
|
||||
if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -877,14 +877,14 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
|
||||
|
||||
switch (dpcd_aux_read_interval) {
|
||||
case 0x01:
|
||||
aux_rd_interval_us = 400;
|
||||
break;
|
||||
case 0x02:
|
||||
aux_rd_interval_us = 4000;
|
||||
break;
|
||||
case 0x03:
|
||||
case 0x02:
|
||||
aux_rd_interval_us = 8000;
|
||||
break;
|
||||
case 0x03:
|
||||
aux_rd_interval_us = 12000;
|
||||
break;
|
||||
case 0x04:
|
||||
aux_rd_interval_us = 16000;
|
||||
break;
|
||||
|
@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 11.6,
|
||||
.sr_enter_plus_exit_time_us = 13.9,
|
||||
.sr_exit_time_us = 8.6,
|
||||
.sr_enter_plus_exit_time_us = 10.9,
|
||||
.urgent_latency_us = 4.0,
|
||||
.urgent_latency_pixel_data_only_us = 4.0,
|
||||
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
|
||||
|
@ -902,6 +902,8 @@ enum dcn20_clk_src_array_id {
|
||||
DCN20_CLK_SRC_PLL0,
|
||||
DCN20_CLK_SRC_PLL1,
|
||||
DCN20_CLK_SRC_PLL2,
|
||||
DCN20_CLK_SRC_PLL3,
|
||||
DCN20_CLK_SRC_PLL4,
|
||||
DCN20_CLK_SRC_TOTAL_DCN21
|
||||
};
|
||||
|
||||
@ -1880,6 +1882,14 @@ static bool dcn21_resource_construct(
|
||||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL2,
|
||||
&clk_src_regs[2], false);
|
||||
pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
|
||||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL3,
|
||||
&clk_src_regs[3], false);
|
||||
pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
|
||||
dcn21_clock_source_create(ctx, ctx->dc_bios,
|
||||
CLOCK_SOURCE_COMBO_PHY_PLL4,
|
||||
&clk_src_regs[4], false);
|
||||
|
||||
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
|
||||
|
||||
|
@ -4224,6 +4224,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
|
||||
|
||||
switch (port->pdt) {
|
||||
case DP_PEER_DEVICE_NONE:
|
||||
break;
|
||||
case DP_PEER_DEVICE_MST_BRANCHING:
|
||||
if (!port->mcs)
|
||||
ret = connector_status_connected;
|
||||
|
@ -182,6 +182,7 @@ struct intel_overlay {
|
||||
struct intel_crtc *crtc;
|
||||
struct i915_vma *vma;
|
||||
struct i915_vma *old_vma;
|
||||
struct intel_frontbuffer *frontbuffer;
|
||||
bool active;
|
||||
bool pfit_active;
|
||||
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
|
||||
@ -282,21 +283,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
||||
struct i915_vma *vma)
|
||||
{
|
||||
enum pipe pipe = overlay->crtc->pipe;
|
||||
struct intel_frontbuffer *from = NULL, *to = NULL;
|
||||
struct intel_frontbuffer *frontbuffer = NULL;
|
||||
|
||||
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
|
||||
|
||||
if (overlay->vma)
|
||||
from = intel_frontbuffer_get(overlay->vma->obj);
|
||||
if (vma)
|
||||
to = intel_frontbuffer_get(vma->obj);
|
||||
frontbuffer = intel_frontbuffer_get(vma->obj);
|
||||
|
||||
intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
if (to)
|
||||
intel_frontbuffer_put(to);
|
||||
if (from)
|
||||
intel_frontbuffer_put(from);
|
||||
if (overlay->frontbuffer)
|
||||
intel_frontbuffer_put(overlay->frontbuffer);
|
||||
overlay->frontbuffer = frontbuffer;
|
||||
|
||||
intel_frontbuffer_flip_prepare(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
|
||||
return names[mode];
|
||||
}
|
||||
|
||||
static void
|
||||
tc_port_load_fia_params(struct drm_i915_private *i915,
|
||||
struct intel_digital_port *dig_port)
|
||||
{
|
||||
enum port port = dig_port->base.port;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, port);
|
||||
u32 modular_fia;
|
||||
|
||||
if (INTEL_INFO(i915)->display.has_modular_fia) {
|
||||
modular_fia = intel_uncore_read(&i915->uncore,
|
||||
PORT_TX_DFLEXDPSP(FIA1));
|
||||
drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
|
||||
modular_fia &= MODULAR_FIA_MASK;
|
||||
} else {
|
||||
modular_fia = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
|
||||
* than two TC ports, there are multiple instances of Modular FIA.
|
||||
*/
|
||||
if (modular_fia) {
|
||||
dig_port->tc_phy_fia = tc_port / 2;
|
||||
dig_port->tc_phy_fia_idx = tc_port % 2;
|
||||
} else {
|
||||
dig_port->tc_phy_fia = FIA1;
|
||||
dig_port->tc_phy_fia_idx = tc_port;
|
||||
}
|
||||
}
|
||||
|
||||
static enum intel_display_power_domain
|
||||
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
|
||||
{
|
||||
@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
|
||||
mutex_unlock(&dig_port->tc_lock);
|
||||
}
|
||||
|
||||
static bool
|
||||
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
|
||||
{
|
||||
intel_wakeref_t wakeref;
|
||||
u32 val;
|
||||
|
||||
if (!INTEL_INFO(i915)->display.has_modular_fia)
|
||||
return false;
|
||||
|
||||
wakeref = tc_cold_block(dig_port);
|
||||
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
|
||||
tc_cold_unblock(dig_port, wakeref);
|
||||
|
||||
drm_WARN_ON(&i915->drm, val == 0xffffffff);
|
||||
|
||||
return val & MODULAR_FIA_MASK;
|
||||
}
|
||||
|
||||
static void
|
||||
tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
|
||||
{
|
||||
enum port port = dig_port->base.port;
|
||||
enum tc_port tc_port = intel_port_to_tc(i915, port);
|
||||
|
||||
/*
|
||||
* Each Modular FIA instance houses 2 TC ports. In SOC that has more
|
||||
* than two TC ports, there are multiple instances of Modular FIA.
|
||||
*/
|
||||
if (tc_has_modular_fia(i915, dig_port)) {
|
||||
dig_port->tc_phy_fia = tc_port / 2;
|
||||
dig_port->tc_phy_fia_idx = tc_port % 2;
|
||||
} else {
|
||||
dig_port->tc_phy_fia = FIA1;
|
||||
dig_port->tc_phy_fia_idx = tc_port;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
@ -689,6 +689,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
||||
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
|
||||
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
|
||||
|
||||
/* Setup the polarity of multiple signals */
|
||||
if (tcon->quirks->polarity_in_ch0) {
|
||||
val = 0;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
|
||||
|
||||
regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
|
||||
} else {
|
||||
/* according to vendor driver, this bit must be always set */
|
||||
val = SUN4I_TCON1_IO_POL_UNKNOWN;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
|
||||
|
||||
regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
|
||||
}
|
||||
|
||||
/* Map output pins to channel 1 */
|
||||
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
|
||||
SUN4I_TCON_GCTL_IOMAP_MASK,
|
||||
@ -1517,6 +1541,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
|
||||
|
||||
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
|
||||
.has_channel_1 = true,
|
||||
.polarity_in_ch0 = true,
|
||||
.set_mux = sun8i_r40_tcon_tv_set_mux,
|
||||
};
|
||||
|
||||
|
@ -153,6 +153,11 @@
|
||||
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
|
||||
|
||||
#define SUN4I_TCON1_IO_POL_REG 0xf0
|
||||
/* there is no documentation about this bit */
|
||||
#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
|
||||
#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
|
||||
#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
|
||||
|
||||
#define SUN4I_TCON1_IO_TRI_REG 0xf4
|
||||
|
||||
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
|
||||
@ -235,6 +240,7 @@ struct sun4i_tcon_quirks {
|
||||
bool needs_de_be_mux; /* sun6i needs mux to select backend */
|
||||
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
|
||||
bool supports_lvds; /* Does the TCON support an LVDS output? */
|
||||
bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
|
||||
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
|
||||
|
||||
/* callback to handle tcon muxing options */
|
||||
|
@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
|
||||
{
|
||||
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
|
||||
|
||||
if (hdmi->quirks->set_rate)
|
||||
clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
|
||||
clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs
|
||||
@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
|
||||
{
|
||||
/*
|
||||
* Controller support maximum of 594 MHz, which correlates to
|
||||
* 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
|
||||
* 340 MHz scrambling has to be enabled. Because scrambling is
|
||||
* not yet implemented, just limit to 340 MHz for now.
|
||||
* 4K@60Hz 4:4:4 or RGB.
|
||||
*/
|
||||
if (mode->clock > 340000)
|
||||
if (mode->clock > 594000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
|
||||
|
||||
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
|
||||
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
|
||||
.set_rate = true,
|
||||
};
|
||||
|
||||
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
|
||||
|
@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
|
||||
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
|
||||
const struct drm_display_info *info,
|
||||
const struct drm_display_mode *mode);
|
||||
unsigned int set_rate : 1;
|
||||
unsigned int use_drm_infoframe : 1;
|
||||
};
|
||||
|
||||
|
@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
|
||||
|
||||
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
|
||||
/* pixelclk bpp8 bpp10 bpp12 */
|
||||
{ 25175000, { 0x0000, 0x0000, 0x0000 }, },
|
||||
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
|
||||
{ 59400000, { 0x0008, 0x0008, 0x0008 }, },
|
||||
{ 72000000, { 0x0008, 0x0008, 0x001b }, },
|
||||
{ 74250000, { 0x0013, 0x0013, 0x0013 }, },
|
||||
{ 90000000, { 0x0008, 0x001a, 0x001b }, },
|
||||
{ 118800000, { 0x001b, 0x001a, 0x001b }, },
|
||||
{ 144000000, { 0x001b, 0x001a, 0x0034 }, },
|
||||
{ 180000000, { 0x001b, 0x0033, 0x0034 }, },
|
||||
{ 216000000, { 0x0036, 0x0033, 0x0034 }, },
|
||||
{ 237600000, { 0x0036, 0x0033, 0x001b }, },
|
||||
{ 288000000, { 0x0036, 0x001b, 0x001b }, },
|
||||
{ 297000000, { 0x0019, 0x001b, 0x0019 }, },
|
||||
{ 330000000, { 0x0036, 0x001b, 0x001b }, },
|
||||
{ 594000000, { 0x003f, 0x001b, 0x001b }, },
|
||||
{ 74250000, { 0x0013, 0x001a, 0x001b }, },
|
||||
{ 148500000, { 0x0019, 0x0033, 0x0034 }, },
|
||||
{ 297000000, { 0x0019, 0x001b, 0x001b }, },
|
||||
{ 594000000, { 0x0010, 0x001b, 0x001b }, },
|
||||
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
|
||||
};
|
||||
|
||||
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
|
||||
/*pixelclk symbol term vlev*/
|
||||
{ 74250000, 0x8009, 0x0004, 0x0232},
|
||||
{ 148500000, 0x8029, 0x0004, 0x0273},
|
||||
{ 594000000, 0x8039, 0x0004, 0x014a},
|
||||
{ 27000000, 0x8009, 0x0007, 0x02b0 },
|
||||
{ 74250000, 0x8009, 0x0006, 0x022d },
|
||||
{ 148500000, 0x8029, 0x0006, 0x0270 },
|
||||
{ 297000000, 0x8039, 0x0005, 0x01ab },
|
||||
{ 594000000, 0x8029, 0x0000, 0x008a },
|
||||
{ ~0UL, 0x0000, 0x0000, 0x0000}
|
||||
};
|
||||
|
||||
|
@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
|
||||
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
|
||||
}
|
||||
|
||||
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
|
||||
static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
|
||||
{
|
||||
if (vc4_state->dlist_count == vc4_state->dlist_size) {
|
||||
u32 new_size = max(4u, vc4_state->dlist_count * 2);
|
||||
@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
|
||||
vc4_state->dlist_size = new_size;
|
||||
}
|
||||
|
||||
vc4_state->dlist[vc4_state->dlist_count++] = val;
|
||||
vc4_state->dlist_count++;
|
||||
}
|
||||
|
||||
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
|
||||
{
|
||||
unsigned int idx = vc4_state->dlist_count;
|
||||
|
||||
vc4_dlist_counter_increment(vc4_state);
|
||||
vc4_state->dlist[idx] = val;
|
||||
}
|
||||
|
||||
/* Returns the scl0/scl1 field based on whether the dimensions need to
|
||||
@ -978,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
|
||||
* be set when calling vc4_plane_allocate_lbm().
|
||||
*/
|
||||
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
|
||||
vc4_state->y_scaling[1] != VC4_SCALING_NONE)
|
||||
vc4_state->lbm_offset = vc4_state->dlist_count++;
|
||||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
|
||||
vc4_state->lbm_offset = vc4_state->dlist_count;
|
||||
vc4_dlist_counter_increment(vc4_state);
|
||||
}
|
||||
|
||||
if (num_planes > 1) {
|
||||
/* Emit Cb/Cr as channel 0 and Y as channel
|
||||
|
@ -57,6 +57,8 @@
|
||||
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
|
||||
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
|
||||
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
|
||||
#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
|
||||
#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
|
||||
#define STM32F7_I2C_CR1_ERRIE BIT(7)
|
||||
#define STM32F7_I2C_CR1_TCIE BIT(6)
|
||||
#define STM32F7_I2C_CR1_STOPIE BIT(5)
|
||||
@ -160,7 +162,7 @@ enum {
|
||||
};
|
||||
|
||||
#define STM32F7_I2C_DNF_DEFAULT 0
|
||||
#define STM32F7_I2C_DNF_MAX 16
|
||||
#define STM32F7_I2C_DNF_MAX 15
|
||||
|
||||
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
|
||||
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
|
||||
@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
|
||||
else
|
||||
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
||||
STM32F7_I2C_CR1_ANFOFF);
|
||||
|
||||
/* Program the Digital Filter */
|
||||
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
||||
STM32F7_I2C_CR1_DNF_MASK);
|
||||
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
||||
STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
|
||||
|
||||
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
|
||||
STM32F7_I2C_CR1_PE);
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
|
||||
|
||||
OBJCOPYFLAGS :=
|
||||
OBJCOPYFLAGS_rodata_objcopy.o := \
|
||||
--rename-section .text=.rodata,alloc,readonly,load
|
||||
--rename-section .noinstr.text=.rodata,alloc,readonly,load
|
||||
targets += rodata.o rodata_objcopy.o
|
||||
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
@ -5,7 +5,7 @@
|
||||
*/
|
||||
#include "lkdtm.h"
|
||||
|
||||
void notrace lkdtm_rodata_do_nothing(void)
|
||||
void noinstr lkdtm_rodata_do_nothing(void)
|
||||
{
|
||||
/* Does nothing. We just want an architecture agnostic "return". */
|
||||
}
|
||||
|
@ -214,9 +214,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
|
||||
{
|
||||
struct ocelot *ocelot = ds->priv;
|
||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||
int err;
|
||||
|
||||
ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
|
||||
DEV_MAC_ENA_CFG);
|
||||
|
||||
ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
|
||||
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
|
||||
|
||||
err = ocelot_port_flush(ocelot, port);
|
||||
if (err)
|
||||
dev_err(ocelot->dev, "failed to flush port %d: %d\n",
|
||||
port, err);
|
||||
|
||||
/* Put the port in reset. */
|
||||
ocelot_port_writel(ocelot_port,
|
||||
DEV_CLOCK_CFG_MAC_TX_RST |
|
||||
DEV_CLOCK_CFG_MAC_RX_RST |
|
||||
DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
|
||||
DEV_CLOCK_CFG);
|
||||
}
|
||||
|
||||
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
|
||||
|
@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
|
||||
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
|
||||
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
|
||||
#define ENETC_RSSHASH_KEY_SIZE 40
|
||||
#define ENETC_PRSSCAPR 0x1404
|
||||
#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
|
||||
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
|
||||
#define ENETC_PSIVLANFMR 0x1700
|
||||
#define ENETC_PSIVLANFMR_VS BIT(0)
|
||||
|
@ -1004,6 +1004,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
|
||||
phylink_destroy(priv->phylink);
|
||||
}
|
||||
|
||||
/* Initialize the entire shared memory for the flow steering entries
|
||||
* of this port (PF + VFs)
|
||||
*/
|
||||
static int enetc_init_port_rfs_memory(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_cmd_rfse rfse = {0};
|
||||
struct enetc_hw *hw = &si->hw;
|
||||
int num_rfs, i, err = 0;
|
||||
u32 val;
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PRFSCAPR);
|
||||
num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
|
||||
|
||||
for (i = 0; i < num_rfs; i++) {
|
||||
err = enetc_set_fs_entry(si, &rfse, i);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int enetc_init_port_rss_memory(struct enetc_si *si)
|
||||
{
|
||||
struct enetc_hw *hw = &si->hw;
|
||||
int num_rss, err;
|
||||
int *rss_table;
|
||||
u32 val;
|
||||
|
||||
val = enetc_port_rd(hw, ENETC_PRSSCAPR);
|
||||
num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
|
||||
if (!num_rss)
|
||||
return 0;
|
||||
|
||||
rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
|
||||
if (!rss_table)
|
||||
return -ENOMEM;
|
||||
|
||||
err = enetc_set_rss_table(si, rss_table, num_rss);
|
||||
|
||||
kfree(rss_table);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
@ -1058,6 +1103,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
goto err_alloc_si_res;
|
||||
}
|
||||
|
||||
err = enetc_init_port_rfs_memory(si);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
|
||||
goto err_init_port_rfs;
|
||||
}
|
||||
|
||||
err = enetc_init_port_rss_memory(si);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
|
||||
goto err_init_port_rss;
|
||||
}
|
||||
|
||||
err = enetc_alloc_msix(priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "MSIX alloc failed\n");
|
||||
@ -1086,6 +1143,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
|
||||
enetc_mdiobus_destroy(pf);
|
||||
err_mdiobus_create:
|
||||
enetc_free_msix(priv);
|
||||
err_init_port_rss:
|
||||
err_init_port_rfs:
|
||||
err_alloc_msix:
|
||||
enetc_free_si_resources(priv);
|
||||
err_alloc_si_res:
|
||||
|
@ -9404,12 +9404,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
|
||||
|
||||
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
|
||||
{
|
||||
struct hnae3_handle *handle = &vport->nic;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int reset_try_times = 0;
|
||||
int reset_status;
|
||||
u16 queue_gid;
|
||||
int ret;
|
||||
|
||||
if (queue_id >= handle->kinfo.num_tqps) {
|
||||
dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
|
||||
queue_id);
|
||||
return;
|
||||
}
|
||||
|
||||
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
|
||||
|
||||
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
|
||||
|
@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
|
||||
struct hclge_vport *vport)
|
||||
{
|
||||
struct hnae3_ring_chain_node *cur_chain, *new_chain;
|
||||
struct hclge_dev *hdev = vport->back;
|
||||
int ring_num;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
ring_num = req->msg.ring_num;
|
||||
|
||||
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < ring_num; i++) {
|
||||
if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
|
||||
dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
|
||||
req->msg.param[i].tqp_index,
|
||||
vport->nic.kinfo.rss_size - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
|
||||
req->msg.param[i].ring_type);
|
||||
req->msg.param[0].ring_type);
|
||||
ring_chain->tqp_index =
|
||||
hclge_get_queue_id(vport->nic.kinfo.tqp
|
||||
[req->msg.param[i].tqp_index]);
|
||||
[req->msg.param[0].tqp_index]);
|
||||
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
||||
HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
|
||||
HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
|
||||
|
||||
cur_chain = ring_chain;
|
||||
|
||||
@ -581,6 +591,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
|
||||
|
||||
index = mbx_req->msg.data[0];
|
||||
|
||||
/* Check the query index of rss_hash_key from VF, make sure no
|
||||
* more than the size of rss_hash_key.
|
||||
*/
|
||||
if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
|
||||
sizeof(vport[0].rss_hash_key)) {
|
||||
dev_warn(&hdev->pdev->dev,
|
||||
"failed to get the rss hash key, the index(%u) invalid !\n",
|
||||
index);
|
||||
return;
|
||||
}
|
||||
|
||||
memcpy(resp_msg->data,
|
||||
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
|
||||
HCLGE_RSS_MBX_RESP_LEN);
|
||||
|
@ -4813,7 +4813,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
||||
complete(&adapter->init_done);
|
||||
adapter->init_done_rc = -EIO;
|
||||
}
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
if (rc && rc != -EBUSY) {
|
||||
/* We were unable to schedule the failover
|
||||
* reset either because the adapter was still
|
||||
* probing (eg: during kexec) or we could not
|
||||
* allocate memory. Clear the failover_pending
|
||||
* flag since no one else will. We ignore
|
||||
* EBUSY because it means either FAILOVER reset
|
||||
* is already scheduled or the adapter is
|
||||
* being removed.
|
||||
*/
|
||||
netdev_err(netdev,
|
||||
"Error %ld scheduling failover reset\n",
|
||||
rc);
|
||||
adapter->failover_pending = false;
|
||||
}
|
||||
break;
|
||||
case IBMVNIC_CRQ_INIT_COMPLETE:
|
||||
dev_info(dev, "Partner initialization complete\n");
|
||||
|
@ -348,6 +348,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
|
||||
{
|
||||
return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
|
||||
}
|
||||
|
||||
int ocelot_port_flush(struct ocelot *ocelot, int port)
|
||||
{
|
||||
int err, val;
|
||||
|
||||
/* Disable dequeuing from the egress queues */
|
||||
ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
|
||||
QSYS_PORT_MODE_DEQUEUE_DIS,
|
||||
QSYS_PORT_MODE, port);
|
||||
|
||||
/* Disable flow control */
|
||||
ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
|
||||
|
||||
/* Disable priority flow control */
|
||||
ocelot_fields_write(ocelot, port,
|
||||
QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
|
||||
|
||||
/* Wait at least the time it takes to receive a frame of maximum length
|
||||
* at the port.
|
||||
* Worst-case delays for 10 kilobyte jumbo frames are:
|
||||
* 8 ms on a 10M port
|
||||
* 800 μs on a 100M port
|
||||
* 80 μs on a 1G port
|
||||
* 32 μs on a 2.5G port
|
||||
*/
|
||||
usleep_range(8000, 10000);
|
||||
|
||||
/* Disable half duplex backpressure. */
|
||||
ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
|
||||
SYS_FRONT_PORT_MODE, port);
|
||||
|
||||
/* Flush the queues associated with the port. */
|
||||
ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
|
||||
REW_PORT_CFG, port);
|
||||
|
||||
/* Enable dequeuing from the egress queues. */
|
||||
ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
|
||||
port);
|
||||
|
||||
/* Wait until flushing is complete. */
|
||||
err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
|
||||
100, 2000000, false, ocelot, port);
|
||||
|
||||
/* Clear flushing again. */
|
||||
ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_flush);
|
||||
|
||||
void ocelot_adjust_link(struct ocelot *ocelot, int port,
|
||||
struct phy_device *phydev)
|
||||
{
|
||||
|
@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_writel);
|
||||
|
||||
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
|
||||
{
|
||||
u32 cur = ocelot_port_readl(port, reg);
|
||||
|
||||
ocelot_port_writel(port, (cur & (~mask)) | val, reg);
|
||||
}
|
||||
EXPORT_SYMBOL(ocelot_port_rmwl);
|
||||
|
||||
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
|
||||
u32 reg, u32 offset)
|
||||
{
|
||||
|
@ -330,7 +330,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
|
||||
|
||||
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
|
||||
} else if (!qopt->enable) {
|
||||
return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
|
||||
ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
|
||||
MTL_QUEUE_DCB);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
|
||||
}
|
||||
|
||||
/* Port Transmit Rate and Speed Divider */
|
||||
|
@ -1253,8 +1253,11 @@ static int netvsc_receive(struct net_device *ndev,
|
||||
ret = rndis_filter_receive(ndev, net_device,
|
||||
nvchan, data, buflen);
|
||||
|
||||
if (unlikely(ret != NVSP_STAT_SUCCESS))
|
||||
if (unlikely(ret != NVSP_STAT_SUCCESS)) {
|
||||
/* Drop incomplete packet */
|
||||
nvchan->rsc.cnt = 0;
|
||||
status = NVSP_STAT_FAIL;
|
||||
}
|
||||
}
|
||||
|
||||
enq_receive_complete(ndev, net_device, q_idx,
|
||||
|
@ -508,8 +508,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
|
||||
return ret;
|
||||
|
||||
drop:
|
||||
/* Drop incomplete packet */
|
||||
nvchan->rsc.cnt = 0;
|
||||
return NVSP_STAT_FAIL;
|
||||
}
|
||||
|
||||
|
@ -1573,6 +1573,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
|
||||
if (!channel->gsi)
|
||||
continue; /* Ignore uninitialized channels */
|
||||
|
||||
ret = -EINVAL;
|
||||
dev_err(gsi->dev, "channel %u not supported by hardware\n",
|
||||
channel_id - 1);
|
||||
channel_id = gsi->channel_count;
|
||||
|
@ -171,11 +171,11 @@ static int x25_open(struct net_device *dev)
|
||||
|
||||
result = lapb_register(dev, &cb);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -ENOMEM;
|
||||
|
||||
result = lapb_getparms(dev, ¶ms);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -EINVAL;
|
||||
|
||||
if (state(hdlc)->settings.dce)
|
||||
params.mode = params.mode | LAPB_DCE;
|
||||
@ -190,7 +190,7 @@ static int x25_open(struct net_device *dev)
|
||||
|
||||
result = lapb_setparms(dev, ¶ms);
|
||||
if (result != LAPB_OK)
|
||||
return result;
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
|
||||
config ATH9K
|
||||
tristate "Atheros 802.11n wireless cards support"
|
||||
depends on MAC80211 && HAS_DMA
|
||||
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
|
||||
select ATH9K_HW
|
||||
select ATH9K_COMMON
|
||||
imply NEW_LEDS
|
||||
imply LEDS_CLASS
|
||||
imply MAC80211_LEDS
|
||||
help
|
||||
This module adds support for wireless adapters based on
|
||||
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
|
||||
@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
|
||||
config ATH9K_HTC
|
||||
tristate "Atheros HTC based wireless cards support"
|
||||
depends on USB && MAC80211
|
||||
select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
|
||||
select ATH9K_HW
|
||||
select ATH9K_COMMON
|
||||
imply NEW_LEDS
|
||||
imply LEDS_CLASS
|
||||
imply MAC80211_LEDS
|
||||
help
|
||||
Support for Atheros HTC based cards.
|
||||
Chipsets supported: AR9271
|
||||
|
@ -519,15 +519,17 @@ static void
|
||||
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
|
||||
int len, bool more)
|
||||
{
|
||||
struct page *page = virt_to_head_page(data);
|
||||
int offset = data - page_address(page);
|
||||
struct sk_buff *skb = q->rx_head;
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
|
||||
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
|
||||
offset += q->buf_offset;
|
||||
struct page *page = virt_to_head_page(data);
|
||||
int offset = data - page_address(page) + q->buf_offset;
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
|
||||
q->buf_size);
|
||||
} else {
|
||||
skb_free_frag(data);
|
||||
}
|
||||
|
||||
if (more)
|
||||
|
@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
RING_IDX prod, cons;
|
||||
struct sk_buff *skb;
|
||||
int needed;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&queue->rx_queue.lock, flags);
|
||||
|
||||
skb = skb_peek(&queue->rx_queue);
|
||||
if (!skb)
|
||||
if (!skb) {
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
|
||||
if (skb_is_gso(skb))
|
||||
@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
|
||||
if (skb->sw_hash)
|
||||
needed++;
|
||||
|
||||
spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
|
||||
|
||||
do {
|
||||
prod = queue->rx.sring->req_prod;
|
||||
cons = queue->rx.req_cons;
|
||||
|
@ -3247,6 +3247,8 @@ static const struct pci_device_id nvme_id_table[] = {
|
||||
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
|
||||
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
|
||||
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
|
||||
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
|
||||
.driver_data = NVME_QUIRK_LIGHTNVM, },
|
||||
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
|
||||
|
@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
|
||||
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
|
||||
|
||||
static int enable_tablet_mode_sw = -1;
|
||||
module_param(enable_tablet_mode_sw, int, 0444);
|
||||
MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
|
||||
|
||||
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
|
||||
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
|
||||
|
||||
@ -654,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
|
||||
}
|
||||
|
||||
/* Tablet mode */
|
||||
val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
|
||||
if (!(val < 0)) {
|
||||
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
|
||||
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
|
||||
if (enable_tablet_mode_sw > 0) {
|
||||
val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
|
||||
if (val >= 0) {
|
||||
__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
|
||||
input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
|
||||
}
|
||||
}
|
||||
|
||||
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
|
||||
|
@ -714,6 +714,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!vport->phba->sli4_hba.nvmels_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* there are two dma buf in the request, actually there is one and
|
||||
* the second one is just the start address + cmd size.
|
||||
|
@ -6881,6 +6881,7 @@ static void __exit scsi_debug_exit(void)
|
||||
|
||||
sdebug_erase_all_stores(false);
|
||||
xa_destroy(per_store_ap);
|
||||
kfree(sdebug_q_arr);
|
||||
}
|
||||
|
||||
device_initcall(scsi_debug_init);
|
||||
|
@ -552,6 +552,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
|
||||
const struct omap_rst_map *map;
|
||||
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
|
||||
char buf[32];
|
||||
u32 v;
|
||||
|
||||
/*
|
||||
* Check if we have controllable resets. If either rstctrl is non-zero
|
||||
@ -599,6 +600,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
|
||||
map++;
|
||||
}
|
||||
|
||||
/* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
|
||||
if (prm->data->rstmap == rst_map_012) {
|
||||
v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
|
||||
if ((v & reset->mask) != reset->mask) {
|
||||
dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
|
||||
writel_relaxed(reset->mask, reset->prm->base +
|
||||
reset->prm->data->rstctrl);
|
||||
}
|
||||
}
|
||||
|
||||
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
|
||||
}
|
||||
|
||||
|
@ -1646,9 +1646,16 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
|
||||
|
||||
/* pass ownership to the completion handler */
|
||||
urb->status = status;
|
||||
kcov_remote_start_usb((u64)urb->dev->bus->busnum);
|
||||
/*
|
||||
* This function can be called in task context inside another remote
|
||||
* coverage collection section, but KCOV doesn't support that kind of
|
||||
* recursion yet. Only collect coverage in softirq context for now.
|
||||
*/
|
||||
if (in_serving_softirq())
|
||||
kcov_remote_start_usb((u64)urb->dev->bus->busnum);
|
||||
urb->complete(urb);
|
||||
kcov_remote_stop();
|
||||
if (in_serving_softirq())
|
||||
kcov_remote_stop();
|
||||
|
||||
usb_anchor_resume_wakeups(anchor);
|
||||
atomic_dec(&urb->use_count);
|
||||
|
@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
|
||||
const char *type,
|
||||
const char *nodename);
|
||||
int xenbus_probe_devices(struct xen_bus_type *bus);
|
||||
void xenbus_probe(void);
|
||||
|
||||
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
|
||||
|
||||
|
@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
|
||||
|
||||
void xenbus_probe(void)
|
||||
static void xenbus_probe(void)
|
||||
{
|
||||
xenstored_ready = 1;
|
||||
|
||||
|
@ -368,6 +368,7 @@ arch/arm64/mm/fault.c
|
||||
arch/arm64/mm/flush.c
|
||||
arch/arm64/mm/init.c
|
||||
arch/arm64/mm/ioremap.c
|
||||
arch/arm64/mm/kasan_init.c
|
||||
arch/arm64/mm/mmap.c
|
||||
arch/arm64/mm/mmu.c
|
||||
arch/arm64/mm/mteswap.c
|
||||
@ -4758,6 +4759,7 @@ lib/sha1.c
|
||||
lib/show_mem.c
|
||||
lib/siphash.c
|
||||
lib/sort.c
|
||||
lib/stackdepot.c
|
||||
lib/string.c
|
||||
lib/string_helpers.c
|
||||
lib/strncpy_from_user.c
|
||||
@ -4830,6 +4832,11 @@ mm/init-mm.c
|
||||
mm/internal.h
|
||||
mm/interval_tree.c
|
||||
mm/ioremap.c
|
||||
mm/kasan/common.c
|
||||
mm/kasan/hw_tags.c
|
||||
mm/kasan/kasan.h
|
||||
mm/kasan/report.c
|
||||
mm/kasan/report_hw_tags.c
|
||||
mm/kfence/core.c
|
||||
mm/kfence/kfence.h
|
||||
mm/kfence/report.c
|
||||
@ -4854,8 +4861,10 @@ mm/mremap.c
|
||||
mm/msync.c
|
||||
mm/oom_kill.c
|
||||
mm/page_alloc.c
|
||||
mm/page_ext.c
|
||||
mm/page_io.c
|
||||
mm/page_isolation.c
|
||||
mm/page_owner.c
|
||||
mm/page_reporting.h
|
||||
mm/page_vma_mapped.c
|
||||
mm/pagewalk.c
|
||||
|
@ -204,7 +204,7 @@ config TMPFS_XATTR
|
||||
|
||||
config TMPFS_INODE64
|
||||
bool "Use 64-bit ino_t by default in tmpfs"
|
||||
depends on TMPFS && 64BIT
|
||||
depends on TMPFS && 64BIT && !(S390 || ALPHA)
|
||||
default n
|
||||
help
|
||||
tmpfs has historically used only inode numbers as wide as an unsigned
|
||||
|
@ -84,6 +84,14 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
|
||||
|
||||
if (ovl_is_private_xattr(sb, name))
|
||||
continue;
|
||||
|
||||
error = security_inode_copy_up_xattr(name);
|
||||
if (error < 0 && error != -EOPNOTSUPP)
|
||||
break;
|
||||
if (error == 1) {
|
||||
error = 0;
|
||||
continue; /* Discard */
|
||||
}
|
||||
retry:
|
||||
size = vfs_getxattr(old, name, value, value_size);
|
||||
if (size == -ERANGE)
|
||||
@ -107,13 +115,6 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
|
||||
goto retry;
|
||||
}
|
||||
|
||||
error = security_inode_copy_up_xattr(name);
|
||||
if (error < 0 && error != -EOPNOTSUPP)
|
||||
break;
|
||||
if (error == 1) {
|
||||
error = 0;
|
||||
continue; /* Discard */
|
||||
}
|
||||
error = vfs_setxattr(new, name, value, size, 0);
|
||||
if (error) {
|
||||
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
|
||||
|
@ -346,7 +346,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
|
||||
goto out;
|
||||
|
||||
if (!value && !upperdentry) {
|
||||
old_cred = ovl_override_creds(dentry->d_sb);
|
||||
err = vfs_getxattr(realdentry, name, NULL, 0);
|
||||
revert_creds(old_cred);
|
||||
if (err < 0)
|
||||
goto out_drop_write;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static void ovl_dentry_release(struct dentry *dentry)
|
||||
static struct dentry *ovl_d_real(struct dentry *dentry,
|
||||
const struct inode *inode)
|
||||
{
|
||||
struct dentry *real;
|
||||
struct dentry *real = NULL, *lower;
|
||||
|
||||
/* It's an overlay file */
|
||||
if (inode && d_inode(dentry) == inode)
|
||||
@ -103,9 +103,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
|
||||
if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
|
||||
return real;
|
||||
|
||||
real = ovl_dentry_lowerdata(dentry);
|
||||
if (!real)
|
||||
lower = ovl_dentry_lowerdata(dentry);
|
||||
if (!lower)
|
||||
goto bug;
|
||||
real = lower;
|
||||
|
||||
/* Handle recursion */
|
||||
real = d_real(real, inode);
|
||||
@ -113,8 +114,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
|
||||
if (!inode || inode == d_inode(real))
|
||||
return real;
|
||||
bug:
|
||||
WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
|
||||
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
|
||||
WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
|
||||
__func__, dentry, inode ? inode->i_sb->s_id : "NULL",
|
||||
inode ? inode->i_ino : 0, real,
|
||||
real && d_inode(real) ? d_inode(real)->i_ino : 0);
|
||||
return dentry;
|
||||
}
|
||||
|
||||
|
@ -462,7 +462,7 @@
|
||||
} \
|
||||
\
|
||||
/* Built-in firmware blobs */ \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
|
||||
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
|
||||
__start_builtin_fw = .; \
|
||||
KEEP(*(.builtin_fw)) \
|
||||
__end_builtin_fw = .; \
|
||||
|
@ -79,6 +79,7 @@ static inline void kasan_disable_current(void) {}
|
||||
struct kasan_cache {
|
||||
int alloc_meta_offset;
|
||||
int free_meta_offset;
|
||||
bool is_kmalloc;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
@ -139,6 +140,13 @@ static __always_inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
__kasan_cache_create(cache, size, flags);
|
||||
}
|
||||
|
||||
void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
|
||||
static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_cache_create_kmalloc(cache);
|
||||
}
|
||||
|
||||
size_t __kasan_metadata_size(struct kmem_cache *cache);
|
||||
static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
|
||||
{
|
||||
@ -188,6 +196,13 @@ static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
|
||||
return false;
|
||||
}
|
||||
|
||||
void __kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_kfree_large(void *ptr)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_kfree_large(ptr, _RET_IP_);
|
||||
}
|
||||
|
||||
void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_slab_free_mempool(void *ptr)
|
||||
{
|
||||
@ -235,13 +250,6 @@ static __always_inline void * __must_check kasan_krealloc(const void *object,
|
||||
return (void *)object;
|
||||
}
|
||||
|
||||
void __kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
static __always_inline void kasan_kfree_large(void *ptr)
|
||||
{
|
||||
if (kasan_enabled())
|
||||
__kasan_kfree_large(ptr, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
|
||||
* the hardware tag-based mode that doesn't rely on compiler instrumentation.
|
||||
@ -274,6 +282,7 @@ static inline void kasan_free_pages(struct page *page, unsigned int order) {}
|
||||
static inline void kasan_cache_create(struct kmem_cache *cache,
|
||||
unsigned int *size,
|
||||
slab_flags_t *flags) {}
|
||||
static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
|
||||
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
|
||||
static inline void kasan_poison_slab(struct page *page) {}
|
||||
static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
|
||||
@ -289,6 +298,7 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void kasan_kfree_large(void *ptr) {}
|
||||
static inline void kasan_slab_free_mempool(void *ptr) {}
|
||||
static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags)
|
||||
@ -309,7 +319,6 @@ static inline void *kasan_krealloc(const void *object, size_t new_size,
|
||||
{
|
||||
return (void *)object;
|
||||
}
|
||||
static inline void kasan_kfree_large(void *ptr) {}
|
||||
static inline bool kasan_check_byte(const void *address)
|
||||
{
|
||||
return true;
|
||||
@ -344,6 +353,13 @@ static inline void *kasan_reset_tag(const void *addr)
|
||||
return (void *)arch_kasan_reset_tag(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* kasan_report - print a report about a bad memory access detected by KASAN
|
||||
* @addr: address of the bad access
|
||||
* @size: size of the bad access
|
||||
* @is_write: whether the bad access is a write or a read
|
||||
* @ip: instruction pointer for the accessibility check or the bad access itself
|
||||
*/
|
||||
bool kasan_report(unsigned long addr, size_t size,
|
||||
bool is_write, unsigned long ip);
|
||||
|
||||
|
@ -4313,6 +4313,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
||||
|
||||
local_bh_disable();
|
||||
cpu = smp_processor_id();
|
||||
spin_lock(&dev->tx_global_lock);
|
||||
for (i = 0; i < dev->num_tx_queues; i++) {
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
||||
|
||||
@ -4320,6 +4321,7 @@ static inline void netif_tx_disable(struct net_device *dev)
|
||||
netif_tx_stop_queue(txq);
|
||||
__netif_tx_unlock(txq);
|
||||
}
|
||||
spin_unlock(&dev->tx_global_lock);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user