Snap for 7386310 from fb9e7c2977
to android12-5.10-keystone-qcom-release
Change-Id: I05fe29795fec61a2a0fdb872997b9110a1445625
This commit is contained in:
commit
4332ba273e
1
.gitignore
vendored
1
.gitignore
vendored
@ -56,6 +56,7 @@ modules.order
|
||||
/tags
|
||||
/TAGS
|
||||
/linux
|
||||
/modules-only.symvers
|
||||
/vmlinux
|
||||
/vmlinux.32
|
||||
/vmlinux.symvers
|
||||
|
@ -45,9 +45,14 @@ fffe8000 fffeffff DTCM mapping area for platforms with
|
||||
fffe0000 fffe7fff ITCM mapping area for platforms with
|
||||
ITCM mounted inside the CPU.
|
||||
|
||||
ffc00000 ffefffff Fixmap mapping region. Addresses provided
|
||||
ffc80000 ffefffff Fixmap mapping region. Addresses provided
|
||||
by fix_to_virt() will be located here.
|
||||
|
||||
ffc00000 ffc7ffff Guard region
|
||||
|
||||
ff800000 ffbfffff Permanent, fixed read-only mapping of the
|
||||
firmware provided DT blob
|
||||
|
||||
fee00000 feffffff Mapping of PCI I/O space. This is a static
|
||||
mapping within the vmalloc space.
|
||||
|
||||
|
@ -278,23 +278,35 @@ required:
|
||||
- interrupts
|
||||
- clocks
|
||||
- power-domains
|
||||
- resets
|
||||
|
||||
if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,vin-r8a7778
|
||||
- renesas,vin-r8a7779
|
||||
- renesas,rcar-gen2-vin
|
||||
then:
|
||||
required:
|
||||
- port
|
||||
else:
|
||||
required:
|
||||
- renesas,id
|
||||
- ports
|
||||
allOf:
|
||||
- if:
|
||||
not:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,vin-r8a7778
|
||||
- renesas,vin-r8a7779
|
||||
then:
|
||||
required:
|
||||
- resets
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- renesas,vin-r8a7778
|
||||
- renesas,vin-r8a7779
|
||||
- renesas,rcar-gen2-vin
|
||||
then:
|
||||
required:
|
||||
- port
|
||||
else:
|
||||
required:
|
||||
- renesas,id
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
|
@ -93,11 +93,6 @@ properties:
|
||||
- mediatek,mt7622-btif
|
||||
- mediatek,mt7623-btif
|
||||
- const: mediatek,mtk-btif
|
||||
- items:
|
||||
- enum:
|
||||
- mediatek,mt7622-btif
|
||||
- mediatek,mt7623-btif
|
||||
- const: mediatek,mtk-btif
|
||||
- items:
|
||||
- const: mrvl,mmp-uart
|
||||
- const: intel,xscale-uart
|
||||
|
@ -178,6 +178,7 @@ mktables
|
||||
mktree
|
||||
mkutf8data
|
||||
modpost
|
||||
modules-only.symvers
|
||||
modules.builtin
|
||||
modules.builtin.modinfo
|
||||
modules.nsdeps
|
||||
|
@ -6694,6 +6694,7 @@ F: Documentation/filesystems/f2fs.rst
|
||||
F: fs/f2fs/
|
||||
F: include/linux/f2fs_fs.h
|
||||
F: include/trace/events/f2fs.h
|
||||
F: include/uapi/linux/f2fs.h
|
||||
|
||||
F71805F HARDWARE MONITORING DRIVER
|
||||
M: Jean Delvare <jdelvare@suse.com>
|
||||
|
4
Makefile
4
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 37
|
||||
SUBLEVEL = 38
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
@ -1567,7 +1567,7 @@ endif # CONFIG_MODULES
|
||||
# make distclean Remove editor backup files, patch leftover files and the like
|
||||
|
||||
# Directories & files removed with 'make clean'
|
||||
CLEAN_FILES += include/ksym vmlinux.symvers \
|
||||
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
|
||||
modules.builtin modules.builtin.modinfo modules.nsdeps \
|
||||
compile_commands.json
|
||||
|
||||
|
@ -1203,6 +1203,7 @@
|
||||
<elf-symbol name='dma_fence_remove_callback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x3b20fb95'/>
|
||||
<elf-symbol name='dma_fence_signal' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x731c4a9c'/>
|
||||
<elf-symbol name='dma_fence_signal_locked' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x3d02cd70'/>
|
||||
<elf-symbol name='dma_fence_signal_timestamp_locked' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x8455e3a7'/>
|
||||
<elf-symbol name='dma_fence_wait_any_timeout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x51d58e8'/>
|
||||
<elf-symbol name='dma_fence_wait_timeout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x98c039dc'/>
|
||||
<elf-symbol name='dma_free_attrs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes' crc='0x52eb36a6'/>
|
||||
@ -57633,6 +57634,11 @@
|
||||
<parameter type-id='bd54fe1a' name='timeout' filepath='drivers/dma-buf/dma-fence.c' line='730' column='1'/>
|
||||
<return type-id='bd54fe1a'/>
|
||||
</function-decl>
|
||||
<function-decl name='dma_fence_signal_timestamp_locked' mangled-name='dma_fence_signal_timestamp_locked' filepath='drivers/dma-buf/dma-fence.c' line='332' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dma_fence_signal_timestamp_locked'>
|
||||
<parameter type-id='28271da3' name='fence' filepath='drivers/dma-buf/dma-fence.c' line='332' column='1'/>
|
||||
<parameter type-id='fbc017ef' name='timestamp' filepath='drivers/dma-buf/dma-fence.c' line='333' column='1'/>
|
||||
<return type-id='95e97e5e'/>
|
||||
</function-decl>
|
||||
<function-decl name='dma_fence_wait_any_timeout' mangled-name='dma_fence_wait_any_timeout' filepath='drivers/dma-buf/dma-fence.c' line='820' column='1' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dma_fence_wait_any_timeout'>
|
||||
<parameter type-id='be937eab' name='fences' filepath='drivers/dma-buf/dma-fence.c' line='820' column='1'/>
|
||||
<parameter type-id='8f92235e' name='count' filepath='drivers/dma-buf/dma-fence.c' line='820' column='1'/>
|
||||
@ -60432,7 +60438,7 @@
|
||||
<parameter type-id='94411a61'/>
|
||||
<return type-id='48b5725f'/>
|
||||
</function-type>
|
||||
<union-decl name='__anonymous_union__7' size-in-bits='192' is-anonymous='yes' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1' id='df24571d'>
|
||||
<union-decl name='__anonymous_union__15' size-in-bits='192' is-anonymous='yes' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1' id='df24571d'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='kfifo' type-id='bbbc6c1e' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1'/>
|
||||
</data-member>
|
||||
@ -118039,7 +118045,7 @@
|
||||
<var-decl name='buf' type-id='5e6516ee' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<union-decl name='__anonymous_union__7' size-in-bits='192' is-anonymous='yes' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1' id='df24571d'>
|
||||
<union-decl name='__anonymous_union__15' size-in-bits='192' is-anonymous='yes' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1' id='df24571d'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='kfifo' type-id='bbbc6c1e' visibility='default' filepath='include/linux/kfifo.h' line='83' column='1'/>
|
||||
</data-member>
|
||||
@ -181809,6 +181815,17 @@
|
||||
<var-decl name='__pad4' type-id='7e3959e3' visibility='default' filepath='include/uapi/sound/asound.h' line='585' column='1'/>
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<union-decl name='snd_pcm_sync_id' size-in-bits='128' visibility='default' filepath='include/uapi/sound/asound.h' line='337' column='1' id='85b4e5de'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id' type-id='92a46553' visibility='default' filepath='include/uapi/sound/asound.h' line='338' column='1'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id16' type-id='ff13edc1' visibility='default' filepath='include/uapi/sound/asound.h' line='339' column='1'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id32' type-id='49580a63' visibility='default' filepath='include/uapi/sound/asound.h' line='340' column='1'/>
|
||||
</data-member>
|
||||
</union-decl>
|
||||
<class-decl name='snd_pcm_hardware' size-in-bits='640' is-struct='yes' visibility='default' filepath='include/sound/pcm.h' line='31' column='1' id='eacd353c'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='info' type-id='f0981eeb' visibility='default' filepath='include/sound/pcm.h' line='32' column='1'/>
|
||||
@ -181885,17 +181902,6 @@
|
||||
</data-member>
|
||||
</class-decl>
|
||||
<typedef-decl name='snd_pcm_hw_rule_func_t' type-id='657a5cf1' filepath='include/sound/pcm.h' line='226' column='1' id='58d3b2a5'/>
|
||||
<union-decl name='snd_pcm_sync_id' size-in-bits='128' visibility='default' filepath='include/uapi/sound/asound.h' line='337' column='1' id='85b4e5de'>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id' type-id='92a46553' visibility='default' filepath='include/uapi/sound/asound.h' line='338' column='1'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id16' type-id='ff13edc1' visibility='default' filepath='include/uapi/sound/asound.h' line='339' column='1'/>
|
||||
</data-member>
|
||||
<data-member access='public'>
|
||||
<var-decl name='id32' type-id='49580a63' visibility='default' filepath='include/uapi/sound/asound.h' line='340' column='1'/>
|
||||
</data-member>
|
||||
</union-decl>
|
||||
<class-decl name='snd_timer' size-in-bits='3456' is-struct='yes' visibility='default' filepath='include/sound/timer.h' line='57' column='1' id='73595e52'>
|
||||
<data-member access='public' layout-offset-in-bits='0'>
|
||||
<var-decl name='tmr_class' type-id='95e97e5e' visibility='default' filepath='include/sound/timer.h' line='58' column='1'/>
|
||||
|
@ -587,6 +587,7 @@
|
||||
dma_fence_remove_callback
|
||||
dma_fence_signal
|
||||
dma_fence_signal_locked
|
||||
dma_fence_signal_timestamp_locked
|
||||
dma_fence_wait_timeout
|
||||
dma_free_attrs
|
||||
dma_get_sgtable_attrs
|
||||
|
@ -7,6 +7,18 @@
|
||||
|
||||
#include <uapi/asm/page.h>
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
|
||||
|
||||
#else /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#define PAGE_MASK_PHYS PAGE_MASK
|
||||
|
||||
#endif /* CONFIG_ARC_HAS_PAE40 */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
|
||||
|
@ -107,8 +107,8 @@
|
||||
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
|
||||
|
||||
/* Set of bits not changed in pte_modify */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
|
||||
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
|
||||
_PAGE_SPECIAL)
|
||||
/* More Abbrevaited helpers */
|
||||
#define PAGE_U_NONE __pgprot(___DEF)
|
||||
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
|
||||
@ -132,13 +132,7 @@
|
||||
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
|
||||
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_PAE40
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 40
|
||||
#else
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
|
||||
#define MAX_POSSIBLE_PHYSMEM_BITS 32
|
||||
#endif
|
||||
#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
|
||||
|
||||
/**************************************************************************
|
||||
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
|
||||
|
@ -33,5 +33,4 @@
|
||||
|
||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||
|
||||
|
||||
#endif /* _UAPI__ASM_ARC_PAGE_H */
|
||||
|
@ -177,7 +177,7 @@ tracesys:
|
||||
|
||||
; Do the Sys Call as we normally would.
|
||||
; Validate the Sys Call number
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi tracesys_exit
|
||||
|
||||
@ -255,7 +255,7 @@ ENTRY(EV_Trap)
|
||||
;============ Normal syscall case
|
||||
|
||||
; syscall num shd not exceed the total system calls avail
|
||||
cmp r8, NR_syscalls
|
||||
cmp r8, NR_syscalls - 1
|
||||
mov.hi r0, -ENOSYS
|
||||
bhi .Lret_from_system_call
|
||||
|
||||
|
@ -158,7 +158,16 @@ void __init setup_arch_memory(void)
|
||||
min_high_pfn = PFN_DOWN(high_mem_start);
|
||||
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
|
||||
|
||||
max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
|
||||
/*
|
||||
* max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
|
||||
* For HIGHMEM without PAE max_high_pfn should be less than
|
||||
* min_low_pfn to guarantee that these two regions don't overlap.
|
||||
* For PAE case highmem is greater than lowmem, so it is natural
|
||||
* to use max_high_pfn.
|
||||
*
|
||||
* In both cases, holes should be handled by pfn_valid().
|
||||
*/
|
||||
max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
|
||||
|
||||
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
|
||||
kmap_init();
|
||||
|
@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
|
||||
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned int off;
|
||||
unsigned long vaddr;
|
||||
struct vm_struct *area;
|
||||
phys_addr_t off, end;
|
||||
phys_addr_t end;
|
||||
pgprot_t prot = __pgprot(flags);
|
||||
|
||||
/* Don't allow wraparound, zero size */
|
||||
@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
|
||||
|
||||
/* Mappings have to be page-aligned */
|
||||
off = paddr & ~PAGE_MASK;
|
||||
paddr &= PAGE_MASK;
|
||||
paddr &= PAGE_MASK_PHYS;
|
||||
size = PAGE_ALIGN(end + 1) - paddr;
|
||||
|
||||
/*
|
||||
|
@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
|
||||
pte_t *ptep)
|
||||
{
|
||||
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
|
||||
phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
|
||||
struct page *page = pfn_to_page(pte_pfn(*ptep));
|
||||
|
||||
create_tlb(vma, vaddr, ptep);
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _ASM_FIXMAP_H
|
||||
#define _ASM_FIXMAP_H
|
||||
|
||||
#define FIXADDR_START 0xffc00000UL
|
||||
#define FIXADDR_START 0xffc80000UL
|
||||
#define FIXADDR_END 0xfff00000UL
|
||||
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
|
||||
|
||||
|
@ -67,6 +67,10 @@
|
||||
*/
|
||||
#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
|
||||
|
||||
#define FDT_FIXED_BASE UL(0xff800000)
|
||||
#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
|
||||
#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
|
||||
|
||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||
/*
|
||||
* Allow 16MB-aligned ioremap pages
|
||||
@ -107,6 +111,7 @@ extern unsigned long vectors_base;
|
||||
#define MODULES_VADDR PAGE_OFFSET
|
||||
|
||||
#define XIP_VIRT_ADDR(physaddr) (physaddr)
|
||||
#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
|
||||
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
||||
|
@ -9,12 +9,12 @@
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
||||
extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
|
||||
extern const struct machine_desc *setup_machine_fdt(void *dt_virt);
|
||||
extern void __init arm_dt_init_cpu_maps(void);
|
||||
|
||||
#else /* CONFIG_OF */
|
||||
|
||||
static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
|
||||
static inline const struct machine_desc *setup_machine_fdt(void *dt_virt)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
@ -2,11 +2,11 @@
|
||||
void convert_to_tag_list(struct tag *tags);
|
||||
|
||||
#ifdef CONFIG_ATAGS
|
||||
const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
|
||||
const struct machine_desc *setup_machine_tags(void *__atags_vaddr,
|
||||
unsigned int machine_nr);
|
||||
#else
|
||||
static inline const struct machine_desc * __init __noreturn
|
||||
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
|
||||
setup_machine_tags(void *__atags_vaddr, unsigned int machine_nr)
|
||||
{
|
||||
early_print("no ATAGS support: can't continue\n");
|
||||
while (true);
|
||||
|
@ -174,7 +174,7 @@ static void __init squash_mem_tags(struct tag *tag)
|
||||
}
|
||||
|
||||
const struct machine_desc * __init
|
||||
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
|
||||
setup_machine_tags(void *atags_vaddr, unsigned int machine_nr)
|
||||
{
|
||||
struct tag *tags = (struct tag *)&default_tags;
|
||||
const struct machine_desc *mdesc = NULL, *p;
|
||||
@ -195,8 +195,8 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
|
||||
if (!mdesc)
|
||||
return NULL;
|
||||
|
||||
if (__atags_pointer)
|
||||
tags = phys_to_virt(__atags_pointer);
|
||||
if (atags_vaddr)
|
||||
tags = atags_vaddr;
|
||||
else if (mdesc->atag_offset)
|
||||
tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
|
||||
|
||||
|
@ -203,12 +203,12 @@ static const void * __init arch_get_next_mach(const char *const **match)
|
||||
|
||||
/**
|
||||
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
|
||||
* @dt_phys: physical address of dt blob
|
||||
* @dt_virt: virtual address of dt blob
|
||||
*
|
||||
* If a dtb was passed to the kernel in r2, then use it to choose the
|
||||
* correct machine_desc and to setup the system.
|
||||
*/
|
||||
const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
|
||||
const struct machine_desc * __init setup_machine_fdt(void *dt_virt)
|
||||
{
|
||||
const struct machine_desc *mdesc, *mdesc_best = NULL;
|
||||
|
||||
@ -221,7 +221,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
|
||||
mdesc_best = &__mach_desc_GENERIC_DT;
|
||||
#endif
|
||||
|
||||
if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
|
||||
if (!dt_virt || !early_init_dt_verify(dt_virt))
|
||||
return NULL;
|
||||
|
||||
mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
|
||||
|
@ -274,11 +274,10 @@ __create_page_tables:
|
||||
* We map 2 sections in case the ATAGs/DTB crosses a section boundary.
|
||||
*/
|
||||
mov r0, r2, lsr #SECTION_SHIFT
|
||||
movs r0, r0, lsl #SECTION_SHIFT
|
||||
subne r3, r0, r8
|
||||
addne r3, r3, #PAGE_OFFSET
|
||||
addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
|
||||
orrne r6, r7, r0
|
||||
cmp r2, #0
|
||||
ldrne r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
|
||||
addne r3, r3, r4
|
||||
orrne r6, r7, r0, lsl #SECTION_SHIFT
|
||||
strne r6, [r3], #1 << PMD_ORDER
|
||||
addne r6, r6, #1 << SECTION_SHIFT
|
||||
strne r6, [r3]
|
||||
|
@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
||||
info->trigger = addr;
|
||||
pr_debug("breakpoint fired: address = 0x%x\n", addr);
|
||||
perf_bp_event(bp, regs);
|
||||
if (!bp->overflow_handler)
|
||||
if (is_default_overflow_handler(bp))
|
||||
enable_single_step(bp, addr);
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/interrupt.h>
|
||||
@ -1095,19 +1096,27 @@ static struct notifier_block arm_restart_nb = {
|
||||
|
||||
void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
const struct machine_desc *mdesc;
|
||||
const struct machine_desc *mdesc = NULL;
|
||||
void *atags_vaddr = NULL;
|
||||
|
||||
if (__atags_pointer)
|
||||
atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
|
||||
|
||||
setup_processor();
|
||||
mdesc = setup_machine_fdt(__atags_pointer);
|
||||
if (atags_vaddr) {
|
||||
mdesc = setup_machine_fdt(atags_vaddr);
|
||||
if (mdesc)
|
||||
memblock_reserve(__atags_pointer,
|
||||
fdt_totalsize(atags_vaddr));
|
||||
}
|
||||
if (!mdesc)
|
||||
mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
|
||||
mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
|
||||
if (!mdesc) {
|
||||
early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
|
||||
early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
|
||||
__atags_pointer);
|
||||
if (__atags_pointer)
|
||||
early_print(" r2[]=%*ph\n", 16,
|
||||
phys_to_virt(__atags_pointer));
|
||||
early_print(" r2[]=%*ph\n", 16, atags_vaddr);
|
||||
dump_machine_table();
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,6 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
|
||||
if (mdesc->reserve)
|
||||
mdesc->reserve();
|
||||
|
||||
early_init_fdt_reserve_self();
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
/* reserve memory for DMA contiguous allocations */
|
||||
|
@ -39,6 +39,8 @@
|
||||
#include "mm.h"
|
||||
#include "tcm.h"
|
||||
|
||||
extern unsigned long __atags_pointer;
|
||||
|
||||
/*
|
||||
* empty_zero_page is a special page that is used for
|
||||
* zero-initialized data and COW.
|
||||
@ -946,7 +948,7 @@ static void __init create_mapping(struct map_desc *md)
|
||||
return;
|
||||
}
|
||||
|
||||
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
|
||||
if (md->type == MT_DEVICE &&
|
||||
md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
|
||||
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
|
||||
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
|
||||
@ -1333,6 +1335,15 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
|
||||
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
|
||||
if (__atags_pointer) {
|
||||
/* create a read-only mapping of the device tree */
|
||||
map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);
|
||||
map.virtual = FDT_FIXED_BASE;
|
||||
map.length = FDT_FIXED_SIZE;
|
||||
map.type = MT_ROM;
|
||||
create_mapping(&map);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the kernel if it is XIP.
|
||||
* It is always first in the modulearea.
|
||||
@ -1489,8 +1500,7 @@ static void __init map_lowmem(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM_PV_FIXUP
|
||||
extern unsigned long __atags_pointer;
|
||||
typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
|
||||
typedef void pgtables_remap(long long offset, unsigned long pgd);
|
||||
pgtables_remap lpae_pgtables_remap_asm;
|
||||
|
||||
/*
|
||||
@ -1503,7 +1513,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
unsigned long pa_pgd;
|
||||
unsigned int cr, ttbcr;
|
||||
long long offset;
|
||||
void *boot_data;
|
||||
|
||||
if (!mdesc->pv_fixup)
|
||||
return;
|
||||
@ -1520,7 +1529,6 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
*/
|
||||
lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
|
||||
pa_pgd = __pa(swapper_pg_dir);
|
||||
boot_data = __va(__atags_pointer);
|
||||
barrier();
|
||||
|
||||
pr_info("Switching physical address space to 0x%08llx\n",
|
||||
@ -1556,7 +1564,7 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
|
||||
* needs to be assembly. It's fairly simple, as we're using the
|
||||
* temporary tables setup by the initial assembly code.
|
||||
*/
|
||||
lpae_pgtables_remap(offset, pa_pgd, boot_data);
|
||||
lpae_pgtables_remap(offset, pa_pgd);
|
||||
|
||||
/* Re-enable the caches and cacheable TLB walks */
|
||||
asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
|
||||
|
@ -39,8 +39,8 @@ ENTRY(lpae_pgtables_remap_asm)
|
||||
|
||||
/* Update level 2 entries for the boot data */
|
||||
add r7, r2, #0x1000
|
||||
add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER
|
||||
bic r7, r7, #(1 << L2_ORDER) - 1
|
||||
movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
|
||||
add r7, r7, r3
|
||||
ldrd r4, r5, [r7]
|
||||
adds r4, r4, r0
|
||||
adc r5, r5, r1
|
||||
|
@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
|
||||
if (interrupts_enabled(regs))
|
||||
trace_hardirqs_on();
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(regs->pmr_save);
|
||||
|
||||
/*
|
||||
* We can't use local_daif_restore(regs->pstate) here as
|
||||
* system_has_prio_mask_debugging() won't restore the I bit if it can
|
||||
|
@ -181,14 +181,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
{
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
/*
|
||||
* The CPU masked interrupts, and we are leaving them masked during
|
||||
* do_debug_exception(). Update PMR as if we had called
|
||||
* local_daif_mask().
|
||||
*/
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
arm64_enter_el1_dbg(regs);
|
||||
do_debug_exception(far, esr, regs);
|
||||
arm64_exit_el1_dbg(regs);
|
||||
@ -354,9 +346,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
|
||||
unsigned long far = read_sysreg(far_el1);
|
||||
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
do_debug_exception(far, esr, regs);
|
||||
local_daif_restore(DAIF_PROCCTX_NOIRQ);
|
||||
@ -364,9 +353,6 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
|
||||
|
||||
static void noinstr el0_svc(struct pt_regs *regs)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
do_el0_svc(regs);
|
||||
}
|
||||
@ -441,9 +427,6 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
|
||||
|
||||
static void noinstr el0_svc_compat(struct pt_regs *regs)
|
||||
{
|
||||
if (system_uses_irq_prio_masking())
|
||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||
|
||||
enter_from_user_mode();
|
||||
do_el0_svc_compat(regs);
|
||||
}
|
||||
|
@ -298,6 +298,8 @@ alternative_else_nop_endif
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
mrs_s x20, SYS_ICC_PMR_EL1
|
||||
str x20, [sp, #S_PMR_SAVE]
|
||||
mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
|
||||
msr_s SYS_ICC_PMR_EL1, x20
|
||||
alternative_else_nop_endif
|
||||
|
||||
/* Re-enable tag checking (TCO set on exception entry) */
|
||||
@ -505,8 +507,8 @@ tsk .req x28 // current thread_info
|
||||
/*
|
||||
* Interrupt handling.
|
||||
*/
|
||||
.macro irq_handler
|
||||
ldr_l x1, handle_arch_irq
|
||||
.macro irq_handler, handler:req
|
||||
ldr_l x1, \handler
|
||||
mov x0, sp
|
||||
irq_stack_entry
|
||||
blr x1
|
||||
@ -536,13 +538,41 @@ alternative_endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro gic_prio_irq_setup, pmr:req, tmp:req
|
||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
|
||||
msr_s SYS_ICC_PMR_EL1, \tmp
|
||||
alternative_else_nop_endif
|
||||
.macro el1_interrupt_handler, handler:req
|
||||
enable_da_f
|
||||
|
||||
mov x0, sp
|
||||
bl enter_el1_irq_or_nmi
|
||||
|
||||
irq_handler \handler
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
/*
|
||||
* DA_F were cleared at start of handling. If anything is set in DAIF,
|
||||
* we come back from an NMI, so skip preemption
|
||||
*/
|
||||
mrs x0, daif
|
||||
orr x24, x24, x0
|
||||
alternative_else_nop_endif
|
||||
cbnz x24, 1f // preempt count != 0 || NMI return path
|
||||
bl arm64_preempt_schedule_irq // irq en/disable is done inside
|
||||
1:
|
||||
#endif
|
||||
|
||||
mov x0, sp
|
||||
bl exit_el1_irq_or_nmi
|
||||
.endm
|
||||
|
||||
.macro el0_interrupt_handler, handler:req
|
||||
user_exit_irqoff
|
||||
enable_da_f
|
||||
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
irq_handler \handler
|
||||
.endm
|
||||
|
||||
.text
|
||||
@ -674,32 +704,7 @@ SYM_CODE_END(el1_sync)
|
||||
.align 6
|
||||
SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
|
||||
kernel_entry 1
|
||||
gic_prio_irq_setup pmr=x20, tmp=x1
|
||||
enable_da_f
|
||||
|
||||
mov x0, sp
|
||||
bl enter_el1_irq_or_nmi
|
||||
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||
/*
|
||||
* DA_F were cleared at start of handling. If anything is set in DAIF,
|
||||
* we come back from an NMI, so skip preemption
|
||||
*/
|
||||
mrs x0, daif
|
||||
orr x24, x24, x0
|
||||
alternative_else_nop_endif
|
||||
cbnz x24, 1f // preempt count != 0 || NMI return path
|
||||
bl arm64_preempt_schedule_irq // irq en/disable is done inside
|
||||
1:
|
||||
#endif
|
||||
|
||||
mov x0, sp
|
||||
bl exit_el1_irq_or_nmi
|
||||
|
||||
el1_interrupt_handler handle_arch_irq
|
||||
kernel_exit 1
|
||||
SYM_CODE_END(el1_irq)
|
||||
|
||||
@ -739,22 +744,13 @@ SYM_CODE_END(el0_error_compat)
|
||||
SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
|
||||
kernel_entry 0
|
||||
el0_irq_naked:
|
||||
gic_prio_irq_setup pmr=x20, tmp=x0
|
||||
user_exit_irqoff
|
||||
enable_da_f
|
||||
|
||||
tbz x22, #55, 1f
|
||||
bl do_el0_irq_bp_hardening
|
||||
1:
|
||||
irq_handler
|
||||
|
||||
el0_interrupt_handler handle_arch_irq
|
||||
b ret_to_user
|
||||
SYM_CODE_END(el0_irq)
|
||||
|
||||
SYM_CODE_START_LOCAL(el1_error)
|
||||
kernel_entry 1
|
||||
mrs x1, esr_el1
|
||||
gic_prio_kentry_setup tmp=x2
|
||||
enable_dbg
|
||||
mov x0, sp
|
||||
bl do_serror
|
||||
@ -765,7 +761,6 @@ SYM_CODE_START_LOCAL(el0_error)
|
||||
kernel_entry 0
|
||||
el0_error_naked:
|
||||
mrs x25, esr_el1
|
||||
gic_prio_kentry_setup tmp=x2
|
||||
user_exit_irqoff
|
||||
enable_dbg
|
||||
mov x0, sp
|
||||
|
@ -55,8 +55,10 @@ void __sync_icache_dcache(pte_t pte)
|
||||
{
|
||||
struct page *page = pte_page(pte);
|
||||
|
||||
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||
if (!test_bit(PG_dcache_clean, &page->flags)) {
|
||||
sync_icache_aliases(page_address(page), page_size(page));
|
||||
set_bit(PG_dcache_clean, &page->flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
|
||||
|
||||
|
@ -445,6 +445,18 @@ SYM_FUNC_START(__cpu_setup)
|
||||
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
|
||||
msr_s SYS_GCR_EL1, x10
|
||||
|
||||
/*
|
||||
* If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
|
||||
* RGSR_EL1.SEED must be non-zero for IRG to produce
|
||||
* pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
|
||||
* must initialize it.
|
||||
*/
|
||||
mrs x10, CNTVCT_EL0
|
||||
ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
|
||||
csinc x10, x10, xzr, ne
|
||||
lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
|
||||
msr_s SYS_RGSR_EL1, x10
|
||||
|
||||
/* clear any pending tag check faults in TFSR*_EL1 */
|
||||
msr_s SYS_TFSR_EL1, xzr
|
||||
msr_s SYS_TFSRE0_EL1, xzr
|
||||
|
@ -14,16 +14,20 @@
|
||||
struct elf64_shdr; /* forward declration */
|
||||
|
||||
struct mod_arch_specific {
|
||||
/* Used only at module load time. */
|
||||
struct elf64_shdr *core_plt; /* core PLT section */
|
||||
struct elf64_shdr *init_plt; /* init PLT section */
|
||||
struct elf64_shdr *got; /* global offset table */
|
||||
struct elf64_shdr *opd; /* official procedure descriptors */
|
||||
struct elf64_shdr *unwind; /* unwind-table section */
|
||||
unsigned long gp; /* global-pointer for module */
|
||||
unsigned int next_got_entry; /* index of next available got entry */
|
||||
|
||||
/* Used at module run and cleanup time. */
|
||||
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
|
||||
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
|
||||
unsigned int next_got_entry; /* index of next available got entry */
|
||||
void *opd_addr; /* symbolize uses .opd to get to actual function */
|
||||
unsigned long opd_size;
|
||||
};
|
||||
|
||||
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
|
||||
|
@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
|
||||
int
|
||||
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
|
||||
{
|
||||
struct mod_arch_specific *mas = &mod->arch;
|
||||
|
||||
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
|
||||
if (mod->arch.unwind)
|
||||
if (mas->unwind)
|
||||
register_unwind_table(mod);
|
||||
|
||||
/*
|
||||
* ".opd" was already relocated to the final destination. Store
|
||||
* it's address for use in symbolizer.
|
||||
*/
|
||||
mas->opd_addr = (void *)mas->opd->sh_addr;
|
||||
mas->opd_size = mas->opd->sh_size;
|
||||
|
||||
/*
|
||||
* Module relocation was already done at this point. Section
|
||||
* headers are about to be deleted. Wipe out load-time context.
|
||||
*/
|
||||
mas->core_plt = NULL;
|
||||
mas->init_plt = NULL;
|
||||
mas->got = NULL;
|
||||
mas->opd = NULL;
|
||||
mas->unwind = NULL;
|
||||
mas->gp = 0;
|
||||
mas->next_got_entry = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
|
||||
|
||||
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
|
||||
{
|
||||
Elf64_Shdr *opd = mod->arch.opd;
|
||||
struct mod_arch_specific *mas = &mod->arch;
|
||||
|
||||
if (ptr < (void *)opd->sh_addr ||
|
||||
ptr >= (void *)(opd->sh_addr + opd->sh_size))
|
||||
if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
|
||||
return ptr;
|
||||
|
||||
return dereference_function_descriptor(ptr);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2000, 2004 Maciej W. Rozycki
|
||||
* Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
|
||||
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
@ -9,25 +9,18 @@
|
||||
#ifndef __ASM_DIV64_H
|
||||
#define __ASM_DIV64_H
|
||||
|
||||
#include <asm-generic/div64.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
|
||||
#include <linux/types.h>
|
||||
#if BITS_PER_LONG == 32
|
||||
|
||||
/*
|
||||
* No traps on overflows for any of these...
|
||||
*/
|
||||
|
||||
#define __div64_32(n, base) \
|
||||
({ \
|
||||
#define do_div64_32(res, high, low, base) ({ \
|
||||
unsigned long __cf, __tmp, __tmp2, __i; \
|
||||
unsigned long __quot32, __mod32; \
|
||||
unsigned long __high, __low; \
|
||||
unsigned long long __n; \
|
||||
\
|
||||
__high = *__n >> 32; \
|
||||
__low = __n; \
|
||||
__asm__( \
|
||||
" .set push \n" \
|
||||
" .set noat \n" \
|
||||
@ -51,18 +44,48 @@
|
||||
" subu %0, %0, %z6 \n" \
|
||||
" addiu %2, %2, 1 \n" \
|
||||
"3: \n" \
|
||||
" bnez %4, 0b\n\t" \
|
||||
" srl %5, %1, 0x1f\n\t" \
|
||||
" bnez %4, 0b \n" \
|
||||
" srl %5, %1, 0x1f \n" \
|
||||
" .set pop" \
|
||||
: "=&r" (__mod32), "=&r" (__tmp), \
|
||||
"=&r" (__quot32), "=&r" (__cf), \
|
||||
"=&r" (__i), "=&r" (__tmp2) \
|
||||
: "Jr" (base), "0" (__high), "1" (__low)); \
|
||||
: "Jr" (base), "0" (high), "1" (low)); \
|
||||
\
|
||||
(__n) = __quot32; \
|
||||
(res) = __quot32; \
|
||||
__mod32; \
|
||||
})
|
||||
|
||||
#endif /* BITS_PER_LONG == 64 */
|
||||
#define __div64_32(n, base) ({ \
|
||||
unsigned long __upper, __low, __high, __radix; \
|
||||
unsigned long long __quot; \
|
||||
unsigned long long __div; \
|
||||
unsigned long __mod; \
|
||||
\
|
||||
__div = (*n); \
|
||||
__radix = (base); \
|
||||
\
|
||||
__high = __div >> 32; \
|
||||
__low = __div; \
|
||||
\
|
||||
if (__high < __radix) { \
|
||||
__upper = __high; \
|
||||
__high = 0; \
|
||||
} else { \
|
||||
__upper = __high % __radix; \
|
||||
__high /= __radix; \
|
||||
} \
|
||||
\
|
||||
__mod = do_div64_32(__low, __upper, __low, __radix); \
|
||||
\
|
||||
__quot = __high; \
|
||||
__quot = __quot << 32 | __low; \
|
||||
(*n) = __quot; \
|
||||
__mod; \
|
||||
})
|
||||
|
||||
#endif /* BITS_PER_LONG == 32 */
|
||||
|
||||
#include <asm-generic/div64.h>
|
||||
|
||||
#endif /* __ASM_DIV64_H */
|
||||
|
@ -1739,7 +1739,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||
break;
|
||||
}
|
||||
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
|
||||
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
|
||||
MIPS_ASE_LOONGSON_EXT2);
|
||||
break;
|
||||
@ -1769,7 +1768,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
* register, we correct it here.
|
||||
*/
|
||||
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
|
||||
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
|
||||
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
|
||||
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
|
||||
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
|
||||
@ -1780,7 +1778,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
set_elf_platform(cpu, "loongson3a");
|
||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||
decode_cpucfg(c);
|
||||
c->writecombine = _CACHE_UNCACHED_ACCELERATED;
|
||||
break;
|
||||
default:
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
|
@ -338,11 +338,7 @@
|
||||
lis r1, emergency_ctx@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
cmpwi cr1, r1, 0
|
||||
bne cr1, 1f
|
||||
lis r1, init_thread_union@ha
|
||||
addi r1, r1, init_thread_union@l
|
||||
1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2
|
||||
SAVE_NVGPRS(r11)
|
||||
addi r3, r1, STACK_FRAME_OVERHEAD
|
||||
|
@ -1050,7 +1050,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
|
||||
|
||||
spin_lock_irqsave(&tbl->large_pool.lock, flags);
|
||||
for (i = 0; i < tbl->nr_pools; i++)
|
||||
spin_lock(&tbl->pools[i].lock);
|
||||
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
|
||||
|
||||
iommu_table_release_pages(tbl);
|
||||
|
||||
@ -1078,7 +1078,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
|
||||
|
||||
spin_lock_irqsave(&tbl->large_pool.lock, flags);
|
||||
for (i = 0; i < tbl->nr_pools; i++)
|
||||
spin_lock(&tbl->pools[i].lock);
|
||||
spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
|
||||
|
||||
memset(tbl->it_map, 0, sz);
|
||||
|
||||
|
@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
void *emergency_ctx[NR_CPUS] __ro_after_init;
|
||||
void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
|
||||
|
||||
void __init emergency_stack_init(void)
|
||||
{
|
||||
|
@ -1442,6 +1442,9 @@ void start_secondary(void *unused)
|
||||
|
||||
vdso_getcpu_init();
|
||||
#endif
|
||||
set_numa_node(numa_cpu_lookup_table[cpu]);
|
||||
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
|
||||
|
||||
/* Update topology CPU masks */
|
||||
add_cpu_to_masks(cpu);
|
||||
|
||||
@ -1460,9 +1463,6 @@ void start_secondary(void *unused)
|
||||
shared_caches = true;
|
||||
}
|
||||
|
||||
set_numa_node(numa_cpu_lookup_table[cpu]);
|
||||
set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
|
||||
|
||||
smp_wmb();
|
||||
notify_cpu_starting(cpu);
|
||||
set_cpu_online(cpu, true);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/page.h>
|
||||
@ -227,11 +228,25 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
static int __do_stf_barrier_fixups(void *data)
|
||||
{
|
||||
enum stf_barrier_type *types = data;
|
||||
|
||||
do_stf_entry_barrier_fixups(*types);
|
||||
do_stf_exit_barrier_fixups(*types);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_stf_barrier_fixups(enum stf_barrier_type types)
|
||||
{
|
||||
do_stf_entry_barrier_fixups(types);
|
||||
do_stf_exit_barrier_fixups(types);
|
||||
/*
|
||||
* The call to the fallback entry flush, and the fallback/sync-ori exit
|
||||
* flush can not be safely patched in/out while other CPUs are executing
|
||||
* them. So call __do_stf_barrier_fixups() on one CPU while all other CPUs
|
||||
* spin in the stop machine core with interrupts hard disabled.
|
||||
*/
|
||||
stop_machine(__do_stf_barrier_fixups, &types, NULL);
|
||||
}
|
||||
|
||||
void do_uaccess_flush_fixups(enum l1d_flush_type types)
|
||||
@ -284,8 +299,9 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
|
||||
: "unknown");
|
||||
}
|
||||
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
static int __do_entry_flush_fixups(void *data)
|
||||
{
|
||||
enum l1d_flush_type types = *(enum l1d_flush_type *)data;
|
||||
unsigned int instrs[3], *dest;
|
||||
long *start, *end;
|
||||
int i;
|
||||
@ -354,6 +370,19 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
: "ori type" :
|
||||
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
|
||||
: "unknown");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
{
|
||||
/*
|
||||
* The call to the fallback flush can not be safely patched in/out while
|
||||
* other CPUs are executing it. So call __do_entry_flush_fixups() on one
|
||||
* CPU while all other CPUs spin in the stop machine core with interrupts
|
||||
* hard disabled.
|
||||
*/
|
||||
stop_machine(__do_entry_flush_fixups, &types, NULL);
|
||||
}
|
||||
|
||||
void do_rfi_flush_fixups(enum l1d_flush_type types)
|
||||
|
@ -336,7 +336,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
||||
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
|
||||
int psize, int ssize)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
unsigned long vaddr, time_limit;
|
||||
unsigned int step, shift;
|
||||
int rc;
|
||||
int ret = 0;
|
||||
@ -349,8 +349,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
|
||||
|
||||
/* Unmap the full range specificied */
|
||||
vaddr = ALIGN_DOWN(vstart, step);
|
||||
time_limit = jiffies + HZ;
|
||||
|
||||
for (;vaddr < vend; vaddr += step) {
|
||||
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
|
||||
|
||||
/*
|
||||
* For large number of mappings introduce a cond_resched()
|
||||
* to prevent softlockup warnings.
|
||||
*/
|
||||
if (time_after(jiffies, time_limit)) {
|
||||
cond_resched();
|
||||
time_limit = jiffies + HZ;
|
||||
}
|
||||
if (rc == -ENOENT) {
|
||||
ret = -ENOENT;
|
||||
continue;
|
||||
|
@ -47,9 +47,6 @@ static void rtas_stop_self(void)
|
||||
|
||||
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
|
||||
|
||||
printk("cpu %u (hwid %u) Ready to die...\n",
|
||||
smp_processor_id(), hard_smp_processor_id());
|
||||
|
||||
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
|
||||
|
||||
panic("Alas, I survived.\n");
|
||||
|
@ -54,7 +54,7 @@ int riscv_hartid_to_cpuid(int hartid)
|
||||
return i;
|
||||
|
||||
pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
|
||||
return i;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
|
||||
|
@ -588,6 +588,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
|
||||
#endif
|
||||
|
||||
/* NMI */
|
||||
|
||||
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
/*
|
||||
* Special NOIST entry point for VMX which invokes this on the kernel
|
||||
* stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
|
||||
* 'executing' marker.
|
||||
*
|
||||
* On 32bit this just uses the regular NMI entry point because 32-bit does
|
||||
* not have ISTs.
|
||||
*/
|
||||
DECLARE_IDTENTRY(X86_TRAP_NMI, exc_nmi_noist);
|
||||
#else
|
||||
#define asm_exc_nmi_noist asm_exc_nmi
|
||||
#endif
|
||||
|
||||
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
|
||||
#ifdef CONFIG_XEN_PV
|
||||
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
|
||||
|
@ -358,8 +358,6 @@ struct kvm_mmu {
|
||||
int (*sync_page)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp);
|
||||
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
|
||||
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
||||
u64 *spte, const void *pte);
|
||||
hpa_t root_hpa;
|
||||
gpa_t root_pgd;
|
||||
union kvm_mmu_role mmu_role;
|
||||
@ -1019,7 +1017,6 @@ struct kvm_arch {
|
||||
struct kvm_vm_stat {
|
||||
ulong mmu_shadow_zapped;
|
||||
ulong mmu_pte_write;
|
||||
ulong mmu_pte_updated;
|
||||
ulong mmu_pde_zapped;
|
||||
ulong mmu_flooded;
|
||||
ulong mmu_recycled;
|
||||
@ -1671,6 +1668,7 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
|
||||
unsigned long icr, int op_64_bit);
|
||||
|
||||
void kvm_define_user_return_msr(unsigned index, u32 msr);
|
||||
int kvm_probe_user_return_msr(u32 msr);
|
||||
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
|
||||
|
||||
u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
|
||||
|
@ -524,6 +524,16 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
|
||||
mds_user_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
DEFINE_IDTENTRY_RAW(exc_nmi_noist)
|
||||
{
|
||||
exc_nmi(regs);
|
||||
}
|
||||
#endif
|
||||
#if IS_MODULE(CONFIG_KVM_INTEL)
|
||||
EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
|
||||
#endif
|
||||
|
||||
void stop_nmi(void)
|
||||
{
|
||||
ignore_nmis++;
|
||||
|
@ -572,7 +572,8 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
|
||||
case 7:
|
||||
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
entry->eax = 0;
|
||||
entry->ecx = F(RDPID);
|
||||
if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
|
||||
entry->ecx = F(RDPID);
|
||||
++array->nent;
|
||||
default:
|
||||
break;
|
||||
|
@ -4502,7 +4502,7 @@ static const struct opcode group8[] = {
|
||||
* from the register case of group9.
|
||||
*/
|
||||
static const struct gprefix pfx_0f_c7_7 = {
|
||||
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
|
||||
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
|
||||
};
|
||||
|
||||
|
||||
|
@ -468,6 +468,7 @@ enum x86_intercept {
|
||||
x86_intercept_clgi,
|
||||
x86_intercept_skinit,
|
||||
x86_intercept_rdtscp,
|
||||
x86_intercept_rdpid,
|
||||
x86_intercept_icebp,
|
||||
x86_intercept_wbinvd,
|
||||
x86_intercept_monitor,
|
||||
|
@ -1908,8 +1908,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
|
||||
if (!apic->lapic_timer.hv_timer_in_use)
|
||||
goto out;
|
||||
WARN_ON(rcuwait_active(&vcpu->wait));
|
||||
cancel_hv_timer(apic);
|
||||
apic_timer_expired(apic, false);
|
||||
cancel_hv_timer(apic);
|
||||
|
||||
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
|
||||
advance_periodic_target_expiration(apic);
|
||||
|
@ -1715,13 +1715,6 @@ static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp, u64 *spte,
|
||||
const void *pte)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
#define KVM_PAGE_ARRAY_NR 16
|
||||
|
||||
struct kvm_mmu_pages {
|
||||
@ -3820,7 +3813,6 @@ static void nonpaging_init_context(struct kvm_vcpu *vcpu,
|
||||
context->gva_to_gpa = nonpaging_gva_to_gpa;
|
||||
context->sync_page = nonpaging_sync_page;
|
||||
context->invlpg = NULL;
|
||||
context->update_pte = nonpaging_update_pte;
|
||||
context->root_level = 0;
|
||||
context->shadow_root_level = PT32E_ROOT_LEVEL;
|
||||
context->direct_map = true;
|
||||
@ -4402,7 +4394,6 @@ static void paging64_init_context_common(struct kvm_vcpu *vcpu,
|
||||
context->gva_to_gpa = paging64_gva_to_gpa;
|
||||
context->sync_page = paging64_sync_page;
|
||||
context->invlpg = paging64_invlpg;
|
||||
context->update_pte = paging64_update_pte;
|
||||
context->shadow_root_level = level;
|
||||
context->direct_map = false;
|
||||
}
|
||||
@ -4431,7 +4422,6 @@ static void paging32_init_context(struct kvm_vcpu *vcpu,
|
||||
context->gva_to_gpa = paging32_gva_to_gpa;
|
||||
context->sync_page = paging32_sync_page;
|
||||
context->invlpg = paging32_invlpg;
|
||||
context->update_pte = paging32_update_pte;
|
||||
context->shadow_root_level = PT32E_ROOT_LEVEL;
|
||||
context->direct_map = false;
|
||||
}
|
||||
@ -4513,7 +4503,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
||||
context->page_fault = kvm_tdp_page_fault;
|
||||
context->sync_page = nonpaging_sync_page;
|
||||
context->invlpg = NULL;
|
||||
context->update_pte = nonpaging_update_pte;
|
||||
context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
|
||||
context->direct_map = true;
|
||||
context->get_guest_pgd = get_cr3;
|
||||
@ -4690,7 +4679,6 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||
context->gva_to_gpa = ept_gva_to_gpa;
|
||||
context->sync_page = ept_sync_page;
|
||||
context->invlpg = ept_invlpg;
|
||||
context->update_pte = ept_update_pte;
|
||||
context->root_level = level;
|
||||
context->direct_map = false;
|
||||
context->mmu_role.as_u64 = new_role.as_u64;
|
||||
@ -4838,19 +4826,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
|
||||
|
||||
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu_page *sp, u64 *spte,
|
||||
const void *new)
|
||||
{
|
||||
if (sp->role.level != PG_LEVEL_4K) {
|
||||
++vcpu->kvm->stat.mmu_pde_zapped;
|
||||
return;
|
||||
}
|
||||
|
||||
++vcpu->kvm->stat.mmu_pte_updated;
|
||||
vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
|
||||
}
|
||||
|
||||
static bool need_remote_flush(u64 old, u64 new)
|
||||
{
|
||||
if (!is_shadow_present_pte(old))
|
||||
@ -4966,22 +4941,6 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
|
||||
return spte;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ignore various flags when determining if a SPTE can be immediately
|
||||
* overwritten for the current MMU.
|
||||
* - level: explicitly checked in mmu_pte_write_new_pte(), and will never
|
||||
* match the current MMU role, as MMU's level tracks the root level.
|
||||
* - access: updated based on the new guest PTE
|
||||
* - quadrant: handled by get_written_sptes()
|
||||
* - invalid: always false (loop only walks valid shadow pages)
|
||||
*/
|
||||
static const union kvm_mmu_page_role role_ign = {
|
||||
.level = 0xf,
|
||||
.access = 0x7,
|
||||
.quadrant = 0x3,
|
||||
.invalid = 0x1,
|
||||
};
|
||||
|
||||
static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes,
|
||||
struct kvm_page_track_notifier_node *node)
|
||||
@ -5032,14 +4991,10 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
|
||||
local_flush = true;
|
||||
while (npte--) {
|
||||
u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
|
||||
|
||||
entry = *spte;
|
||||
mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
|
||||
if (gentry &&
|
||||
!((sp->role.word ^ base_role) & ~role_ign.word) &&
|
||||
rmap_can_add(vcpu))
|
||||
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
|
||||
if (gentry && sp->role.level != PG_LEVEL_4K)
|
||||
++vcpu->kvm->stat.mmu_pde_zapped;
|
||||
if (need_remote_flush(entry, *spte))
|
||||
remote_flush = true;
|
||||
++spte;
|
||||
|
@ -3139,15 +3139,8 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
|
||||
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
|
||||
|
||||
if (evmptrld_status == EVMPTRLD_VMFAIL ||
|
||||
evmptrld_status == EVMPTRLD_ERROR) {
|
||||
pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
|
||||
__func__);
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror =
|
||||
KVM_INTERNAL_ERROR_EMULATION;
|
||||
vcpu->run->internal.ndata = 0;
|
||||
evmptrld_status == EVMPTRLD_ERROR)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -3235,8 +3228,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
|
||||
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!nested_get_evmcs_page(vcpu))
|
||||
if (!nested_get_evmcs_page(vcpu)) {
|
||||
pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
|
||||
__func__);
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror =
|
||||
KVM_INTERNAL_ERROR_EMULATION;
|
||||
vcpu->run->internal.ndata = 0;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
|
||||
return false;
|
||||
@ -4441,7 +4442,15 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
|
||||
/* trying to cancel vmlaunch/vmresume is a bug */
|
||||
WARN_ON_ONCE(vmx->nested.nested_run_pending);
|
||||
|
||||
kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
|
||||
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
|
||||
/*
|
||||
* KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
|
||||
* Enlightened VMCS after migration and we still need to
|
||||
* do that when something is forcing L2->L1 exit prior to
|
||||
* the first L2 run.
|
||||
*/
|
||||
(void)nested_get_evmcs_page(vcpu);
|
||||
}
|
||||
|
||||
/* Service the TLB flush request for L2 before switching to L1. */
|
||||
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/idtentry.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/kexec.h>
|
||||
@ -6354,18 +6355,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
|
||||
|
||||
void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
|
||||
|
||||
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
|
||||
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
|
||||
unsigned long entry)
|
||||
{
|
||||
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
|
||||
gate_desc *desc = (gate_desc *)host_idt_base + vector;
|
||||
|
||||
kvm_before_interrupt(vcpu);
|
||||
vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
|
||||
vmx_do_interrupt_nmi_irqoff(entry);
|
||||
kvm_after_interrupt(vcpu);
|
||||
}
|
||||
|
||||
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
|
||||
{
|
||||
const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
|
||||
u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
|
||||
|
||||
/* if exit due to PF check for async PF */
|
||||
@ -6376,18 +6376,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
|
||||
kvm_machine_check();
|
||||
/* We need to handle NMIs before interrupts are enabled */
|
||||
else if (is_nmi(intr_info))
|
||||
handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
|
||||
handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
|
||||
}
|
||||
|
||||
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 intr_info = vmx_get_intr_info(vcpu);
|
||||
unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
|
||||
gate_desc *desc = (gate_desc *)host_idt_base + vector;
|
||||
|
||||
if (WARN_ONCE(!is_external_intr(intr_info),
|
||||
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
|
||||
return;
|
||||
|
||||
handle_interrupt_nmi_irqoff(vcpu, intr_info);
|
||||
handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
|
||||
}
|
||||
|
||||
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
|
||||
@ -6862,12 +6864,9 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
|
||||
u32 index = vmx_uret_msrs_list[i];
|
||||
u32 data_low, data_high;
|
||||
int j = vmx->nr_uret_msrs;
|
||||
|
||||
if (rdmsr_safe(index, &data_low, &data_high) < 0)
|
||||
continue;
|
||||
if (wrmsr_safe(index, data_low, data_high) < 0)
|
||||
if (kvm_probe_user_return_msr(index))
|
||||
continue;
|
||||
|
||||
vmx->guest_uret_msrs[j].slot = i;
|
||||
@ -7300,9 +7299,11 @@ static __init void vmx_set_cpu_caps(void)
|
||||
if (!cpu_has_vmx_xsaves())
|
||||
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
|
||||
|
||||
/* CPUID 0x80000001 */
|
||||
if (!cpu_has_vmx_rdtscp())
|
||||
/* CPUID 0x80000001 and 0x7 (RDPID) */
|
||||
if (!cpu_has_vmx_rdtscp()) {
|
||||
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
|
||||
kvm_cpu_cap_clear(X86_FEATURE_RDPID);
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_waitpkg())
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
|
||||
@ -7358,8 +7359,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
||||
/*
|
||||
* RDPID causes #UD if disabled through secondary execution controls.
|
||||
* Because it is marked as EmulateOnUD, we need to intercept it here.
|
||||
* Note, RDPID is hidden behind ENABLE_RDTSCP.
|
||||
*/
|
||||
case x86_intercept_rdtscp:
|
||||
case x86_intercept_rdpid:
|
||||
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
|
||||
exception->vector = UD_VECTOR;
|
||||
exception->error_code_valid = false;
|
||||
|
@ -233,7 +233,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
|
||||
VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
|
||||
VM_STAT("mmu_pte_write", mmu_pte_write),
|
||||
VM_STAT("mmu_pte_updated", mmu_pte_updated),
|
||||
VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
|
||||
VM_STAT("mmu_flooded", mmu_flooded),
|
||||
VM_STAT("mmu_recycled", mmu_recycled),
|
||||
@ -323,6 +322,22 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_probe_user_return_msr(u32 msr)
|
||||
{
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = rdmsrl_safe(msr, &val);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = wrmsrl_safe(msr, val);
|
||||
out:
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_probe_user_return_msr);
|
||||
|
||||
void kvm_define_user_return_msr(unsigned slot, u32 msr)
|
||||
{
|
||||
BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
|
||||
@ -7849,6 +7864,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
|
||||
|
||||
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
|
||||
|
||||
/*
|
||||
* Indirection to move queue_work() out of the tk_core.seq write held
|
||||
* region to prevent possible deadlocks against time accessors which
|
||||
* are invoked with work related locks held.
|
||||
*/
|
||||
static void pvclock_irq_work_fn(struct irq_work *w)
|
||||
{
|
||||
queue_work(system_long_wq, &pvclock_gtod_work);
|
||||
}
|
||||
|
||||
static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
|
||||
|
||||
/*
|
||||
* Notification about pvclock gtod data update.
|
||||
*/
|
||||
@ -7860,13 +7887,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
|
||||
|
||||
update_pvclock_gtod(tk);
|
||||
|
||||
/* disable master clock if host does not trust, or does not
|
||||
* use, TSC based clocksource.
|
||||
/*
|
||||
* Disable master clock if host does not trust, or does not use,
|
||||
* TSC based clocksource. Delegate queue_work() to irq_work as
|
||||
* this is invoked with tk_core.seq write held.
|
||||
*/
|
||||
if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
|
||||
atomic_read(&kvm_guest_has_master_clock) != 0)
|
||||
queue_work(system_long_wq, &pvclock_gtod_work);
|
||||
|
||||
irq_work_queue(&pvclock_irq_work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -7982,6 +8010,8 @@ void kvm_arch_exit(void)
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
|
||||
#ifdef CONFIG_X86_64
|
||||
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
|
||||
irq_work_sync(&pvclock_irq_work);
|
||||
cancel_work_sync(&pvclock_gtod_work);
|
||||
#endif
|
||||
kvm_x86_ops.hardware_enable = NULL;
|
||||
kvm_mmu_module_exit();
|
||||
|
@ -1023,7 +1023,17 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
||||
|
||||
lockdep_assert_held(&ioc->lock);
|
||||
|
||||
inuse = clamp_t(u32, inuse, 1, active);
|
||||
/*
|
||||
* For an active leaf node, its inuse shouldn't be zero or exceed
|
||||
* @active. An active internal node's inuse is solely determined by the
|
||||
* inuse to active ratio of its children regardless of @inuse.
|
||||
*/
|
||||
if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
|
||||
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
||||
iocg->child_active_sum);
|
||||
} else {
|
||||
inuse = clamp_t(u32, inuse, 1, active);
|
||||
}
|
||||
|
||||
iocg->last_inuse = iocg->inuse;
|
||||
if (save)
|
||||
@ -1040,7 +1050,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
||||
/* update the level sums */
|
||||
parent->child_active_sum += (s32)(active - child->active);
|
||||
parent->child_inuse_sum += (s32)(inuse - child->inuse);
|
||||
/* apply the udpates */
|
||||
/* apply the updates */
|
||||
child->active = active;
|
||||
child->inuse = inuse;
|
||||
|
||||
|
@ -2204,8 +2204,9 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
/* Bypass scheduler for flush requests */
|
||||
blk_insert_flush(rq);
|
||||
blk_mq_run_hw_queue(data.hctx, true);
|
||||
} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
|
||||
!blk_queue_nonrot(q))) {
|
||||
} else if (plug && (q->nr_hw_queues == 1 ||
|
||||
blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) ||
|
||||
q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
|
||||
/*
|
||||
* Use plugging if we have a ->commit_rqs() hook as well, as
|
||||
* we know the driver uses bd->last in a smart fashion.
|
||||
|
@ -1301,6 +1301,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{"PNP0C0B", }, /* Generic ACPI fan */
|
||||
{"INT3404", }, /* Fan */
|
||||
{"INTC1044", }, /* Fan for Tiger Lake generation */
|
||||
{"INTC1048", }, /* Fan for Alder Lake generation */
|
||||
{}
|
||||
};
|
||||
struct acpi_device *adev = ACPI_COMPANION(dev);
|
||||
|
@ -705,6 +705,7 @@ int acpi_device_add(struct acpi_device *device,
|
||||
|
||||
result = acpi_device_set_name(device, acpi_device_bus_id);
|
||||
if (result) {
|
||||
kfree_const(acpi_device_bus_id->bus_id);
|
||||
kfree(acpi_device_bus_id);
|
||||
goto err_unlock;
|
||||
}
|
||||
|
@ -2031,7 +2031,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
|
||||
* config ref and try to destroy the workqueue from inside the work
|
||||
* queue.
|
||||
*/
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
if (nbd->recv_workq)
|
||||
flush_workqueue(nbd->recv_workq);
|
||||
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
|
||||
&nbd->config->runtime_flags))
|
||||
nbd_config_put(nbd);
|
||||
|
@ -679,7 +679,11 @@ static void remap_devs(struct rnbd_clt_session *sess)
|
||||
return;
|
||||
}
|
||||
|
||||
rtrs_clt_query(sess->rtrs, &attrs);
|
||||
err = rtrs_clt_query(sess->rtrs, &attrs);
|
||||
if (err) {
|
||||
pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&sess->lock);
|
||||
sess->max_io_size = attrs.max_io_size;
|
||||
|
||||
@ -1211,7 +1215,11 @@ find_and_get_or_create_sess(const char *sessname,
|
||||
err = PTR_ERR(sess->rtrs);
|
||||
goto wake_up_and_put;
|
||||
}
|
||||
rtrs_clt_query(sess->rtrs, &attrs);
|
||||
|
||||
err = rtrs_clt_query(sess->rtrs, &attrs);
|
||||
if (err)
|
||||
goto close_rtrs;
|
||||
|
||||
sess->max_io_size = attrs.max_io_size;
|
||||
sess->queue_depth = attrs.queue_depth;
|
||||
|
||||
|
@ -79,7 +79,7 @@ struct rnbd_clt_session {
|
||||
DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
|
||||
int __percpu *cpu_rr; /* per-cpu var for CPU round-robin */
|
||||
atomic_t busy;
|
||||
int queue_depth;
|
||||
size_t queue_depth;
|
||||
u32 max_io_size;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
struct mutex lock; /* protects state and devs_list */
|
||||
|
@ -392,7 +392,9 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
|
||||
/* MediaTek Bluetooth devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01),
|
||||
.driver_info = BTUSB_MEDIATEK },
|
||||
.driver_info = BTUSB_MEDIATEK |
|
||||
BTUSB_WIDEBAND_SPEECH |
|
||||
BTUSB_VALID_LE_STATES },
|
||||
|
||||
/* Additional Realtek 8723AE Bluetooth devices */
|
||||
{ USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
|
||||
|
@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
|
||||
|
||||
if (nr_commands !=
|
||||
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
|
||||
rc = -EFAULT;
|
||||
tpm_buf_destroy(&buf);
|
||||
goto out;
|
||||
}
|
||||
|
@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
|
||||
cap_t cap;
|
||||
int ret;
|
||||
|
||||
/* TPM 2.0 */
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
|
||||
|
||||
/* TPM 1.2 */
|
||||
ret = request_locality(chip, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
|
||||
if (chip->flags & TPM_CHIP_FLAG_TPM2)
|
||||
ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
|
||||
else
|
||||
ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
|
||||
|
||||
release_locality(chip, 0);
|
||||
|
||||
@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* TPM 1.2 requires self-test on resume. This function actually returns
|
||||
/*
|
||||
* TPM 1.2 requires self-test on resume. This function actually returns
|
||||
* an error code but for unknown reason it isn't handled.
|
||||
*/
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
|
||||
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
|
||||
ret = request_locality(chip, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
tpm1_do_selftest(chip);
|
||||
|
||||
release_locality(chip, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tpm_tis_resume);
|
||||
|
@ -537,8 +537,13 @@ static const struct samsung_gate_clock top1_gate_clks[] __initconst = {
|
||||
GATE(CLK_ACLK_FSYS0_200, "aclk_fsys0_200", "dout_aclk_fsys0_200",
|
||||
ENABLE_ACLK_TOP13, 28, CLK_SET_RATE_PARENT |
|
||||
CLK_IS_CRITICAL, 0),
|
||||
/*
|
||||
* This clock is required for the CMU_FSYS1 registers access, keep it
|
||||
* enabled permanently until proper runtime PM support is added.
|
||||
*/
|
||||
GATE(CLK_ACLK_FSYS1_200, "aclk_fsys1_200", "dout_aclk_fsys1_200",
|
||||
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT, 0),
|
||||
ENABLE_ACLK_TOP13, 24, CLK_SET_RATE_PARENT |
|
||||
CLK_IS_CRITICAL, 0),
|
||||
|
||||
GATE(CLK_SCLK_PHY_FSYS1_26M, "sclk_phy_fsys1_26m",
|
||||
"dout_sclk_phy_fsys1_26m", ENABLE_SCLK_TOP1_FSYS11,
|
||||
|
@ -530,17 +530,17 @@ static void omap_clockevent_unidle(struct clock_event_device *evt)
|
||||
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
|
||||
}
|
||||
|
||||
static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
static int __init dmtimer_clkevt_init_common(struct dmtimer_clockevent *clkevt,
|
||||
struct device_node *np,
|
||||
unsigned int features,
|
||||
const struct cpumask *cpumask,
|
||||
const char *name,
|
||||
int rating)
|
||||
{
|
||||
struct dmtimer_clockevent *clkevt;
|
||||
struct clock_event_device *dev;
|
||||
struct dmtimer_systimer *t;
|
||||
int error;
|
||||
|
||||
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
|
||||
if (!clkevt)
|
||||
return -ENOMEM;
|
||||
|
||||
t = &clkevt->t;
|
||||
dev = &clkevt->dev;
|
||||
|
||||
@ -548,25 +548,23 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
* We mostly use cpuidle_coupled with ARM local timers for runtime,
|
||||
* so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
|
||||
*/
|
||||
dev->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
|
||||
dev->rating = 300;
|
||||
dev->features = features;
|
||||
dev->rating = rating;
|
||||
dev->set_next_event = dmtimer_set_next_event;
|
||||
dev->set_state_shutdown = dmtimer_clockevent_shutdown;
|
||||
dev->set_state_periodic = dmtimer_set_periodic;
|
||||
dev->set_state_oneshot = dmtimer_clockevent_shutdown;
|
||||
dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
|
||||
dev->tick_resume = dmtimer_clockevent_shutdown;
|
||||
dev->cpumask = cpu_possible_mask;
|
||||
dev->cpumask = cpumask;
|
||||
|
||||
dev->irq = irq_of_parse_and_map(np, 0);
|
||||
if (!dev->irq) {
|
||||
error = -ENXIO;
|
||||
goto err_out_free;
|
||||
}
|
||||
if (!dev->irq)
|
||||
return -ENXIO;
|
||||
|
||||
error = dmtimer_systimer_setup(np, &clkevt->t);
|
||||
if (error)
|
||||
goto err_out_free;
|
||||
return error;
|
||||
|
||||
clkevt->period = 0xffffffff - DIV_ROUND_CLOSEST(t->rate, HZ);
|
||||
|
||||
@ -578,32 +576,54 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
writel_relaxed(OMAP_TIMER_CTRL_POSTED, t->base + t->ifctrl);
|
||||
|
||||
error = request_irq(dev->irq, dmtimer_clockevent_interrupt,
|
||||
IRQF_TIMER, "clockevent", clkevt);
|
||||
IRQF_TIMER, name, clkevt);
|
||||
if (error)
|
||||
goto err_out_unmap;
|
||||
|
||||
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
|
||||
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->wakeup);
|
||||
|
||||
pr_info("TI gptimer clockevent: %s%lu Hz at %pOF\n",
|
||||
of_find_property(np, "ti,timer-alwon", NULL) ?
|
||||
pr_info("TI gptimer %s: %s%lu Hz at %pOF\n",
|
||||
name, of_find_property(np, "ti,timer-alwon", NULL) ?
|
||||
"always-on " : "", t->rate, np->parent);
|
||||
|
||||
clockevents_config_and_register(dev, t->rate,
|
||||
3, /* Timer internal resynch latency */
|
||||
0xffffffff);
|
||||
|
||||
if (of_machine_is_compatible("ti,am33xx") ||
|
||||
of_machine_is_compatible("ti,am43")) {
|
||||
dev->suspend = omap_clockevent_idle;
|
||||
dev->resume = omap_clockevent_unidle;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_unmap:
|
||||
iounmap(t->base);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static int __init dmtimer_clockevent_init(struct device_node *np)
|
||||
{
|
||||
struct dmtimer_clockevent *clkevt;
|
||||
int error;
|
||||
|
||||
clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
|
||||
if (!clkevt)
|
||||
return -ENOMEM;
|
||||
|
||||
error = dmtimer_clkevt_init_common(clkevt, np,
|
||||
CLOCK_EVT_FEAT_PERIODIC |
|
||||
CLOCK_EVT_FEAT_ONESHOT,
|
||||
cpu_possible_mask, "clockevent",
|
||||
300);
|
||||
if (error)
|
||||
goto err_out_free;
|
||||
|
||||
clockevents_config_and_register(&clkevt->dev, clkevt->t.rate,
|
||||
3, /* Timer internal resync latency */
|
||||
0xffffffff);
|
||||
|
||||
if (of_machine_is_compatible("ti,am33xx") ||
|
||||
of_machine_is_compatible("ti,am43")) {
|
||||
clkevt->dev.suspend = omap_clockevent_idle;
|
||||
clkevt->dev.resume = omap_clockevent_unidle;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_free:
|
||||
kfree(clkevt);
|
||||
|
||||
|
@ -3019,6 +3019,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
|
||||
{}
|
||||
};
|
||||
|
||||
static bool intel_pstate_hwp_is_enabled(void)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_PM_ENABLE, value);
|
||||
return !!(value & 0x1);
|
||||
}
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
const struct x86_cpu_id *id;
|
||||
@ -3037,8 +3045,12 @@ static int __init intel_pstate_init(void)
|
||||
* Avoid enabling HWP for processors without EPP support,
|
||||
* because that means incomplete HWP implementation which is a
|
||||
* corner case and supporting it is generally problematic.
|
||||
*
|
||||
* If HWP is enabled already, though, there is no choice but to
|
||||
* deal with it.
|
||||
*/
|
||||
if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
|
||||
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
|
||||
intel_pstate_hwp_is_enabled()) {
|
||||
hwp_active++;
|
||||
hwp_mode_bdw = id->driver_data;
|
||||
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||
|
@ -989,7 +989,7 @@ int sev_dev_init(struct psp_device *psp)
|
||||
if (!sev->vdata) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "sev: missing driver data\n");
|
||||
goto e_err;
|
||||
goto e_sev;
|
||||
}
|
||||
|
||||
psp_set_sev_irq_handler(psp, sev_irq_handler, sev);
|
||||
@ -1004,6 +1004,8 @@ int sev_dev_init(struct psp_device *psp)
|
||||
|
||||
e_irq:
|
||||
psp_clear_sev_irq_handler(psp);
|
||||
e_sev:
|
||||
devm_kfree(dev, sev);
|
||||
e_err:
|
||||
psp->sev_data = NULL;
|
||||
|
||||
|
@ -35,15 +35,15 @@ struct idxd_user_context {
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
enum idxd_cdev_cleanup {
|
||||
CDEV_NORMAL = 0,
|
||||
CDEV_FAILED,
|
||||
};
|
||||
|
||||
static void idxd_cdev_dev_release(struct device *dev)
|
||||
{
|
||||
dev_dbg(dev, "releasing cdev device\n");
|
||||
kfree(dev);
|
||||
struct idxd_cdev *idxd_cdev = container_of(dev, struct idxd_cdev, dev);
|
||||
struct idxd_cdev_context *cdev_ctx;
|
||||
struct idxd_wq *wq = idxd_cdev->wq;
|
||||
|
||||
cdev_ctx = &ictx[wq->idxd->type];
|
||||
ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
|
||||
kfree(idxd_cdev);
|
||||
}
|
||||
|
||||
static struct device_type idxd_cdev_device_type = {
|
||||
@ -58,14 +58,11 @@ static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
|
||||
return container_of(cdev, struct idxd_cdev, cdev);
|
||||
}
|
||||
|
||||
static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
|
||||
{
|
||||
return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
|
||||
}
|
||||
|
||||
static inline struct idxd_wq *inode_wq(struct inode *inode)
|
||||
{
|
||||
return idxd_cdev_wq(inode_idxd_cdev(inode));
|
||||
struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
|
||||
|
||||
return idxd_cdev->wq;
|
||||
}
|
||||
|
||||
static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
@ -172,11 +169,10 @@ static __poll_t idxd_cdev_poll(struct file *filp,
|
||||
struct idxd_user_context *ctx = filp->private_data;
|
||||
struct idxd_wq *wq = ctx->wq;
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
|
||||
unsigned long flags;
|
||||
__poll_t out = 0;
|
||||
|
||||
poll_wait(filp, &idxd_cdev->err_queue, wait);
|
||||
poll_wait(filp, &wq->err_queue, wait);
|
||||
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||
if (idxd->sw_err.valid)
|
||||
out = EPOLLIN | EPOLLRDNORM;
|
||||
@ -198,98 +194,67 @@ int idxd_cdev_get_major(struct idxd_device *idxd)
|
||||
return MAJOR(ictx[idxd->type].devt);
|
||||
}
|
||||
|
||||
static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
|
||||
int idxd_wq_add_cdev(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
|
||||
struct idxd_cdev_context *cdev_ctx;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
struct cdev *cdev;
|
||||
struct device *dev;
|
||||
int minor, rc;
|
||||
struct idxd_cdev_context *cdev_ctx;
|
||||
int rc, minor;
|
||||
|
||||
idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
|
||||
if (!idxd_cdev->dev)
|
||||
idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
|
||||
if (!idxd_cdev)
|
||||
return -ENOMEM;
|
||||
|
||||
dev = idxd_cdev->dev;
|
||||
dev->parent = &idxd->pdev->dev;
|
||||
dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
|
||||
idxd->id, wq->id);
|
||||
dev->bus = idxd_get_bus_type(idxd);
|
||||
|
||||
idxd_cdev->wq = wq;
|
||||
cdev = &idxd_cdev->cdev;
|
||||
dev = &idxd_cdev->dev;
|
||||
cdev_ctx = &ictx[wq->idxd->type];
|
||||
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
|
||||
if (minor < 0) {
|
||||
rc = minor;
|
||||
kfree(dev);
|
||||
goto ida_err;
|
||||
}
|
||||
|
||||
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
|
||||
dev->type = &idxd_cdev_device_type;
|
||||
rc = device_register(dev);
|
||||
if (rc < 0) {
|
||||
dev_err(&idxd->pdev->dev, "device register failed\n");
|
||||
goto dev_reg_err;
|
||||
kfree(idxd_cdev);
|
||||
return minor;
|
||||
}
|
||||
idxd_cdev->minor = minor;
|
||||
|
||||
return 0;
|
||||
device_initialize(dev);
|
||||
dev->parent = &wq->conf_dev;
|
||||
dev->bus = idxd_get_bus_type(idxd);
|
||||
dev->type = &idxd_cdev_device_type;
|
||||
dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
|
||||
|
||||
dev_reg_err:
|
||||
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
|
||||
put_device(dev);
|
||||
ida_err:
|
||||
idxd_cdev->dev = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
|
||||
enum idxd_cdev_cleanup cdev_state)
|
||||
{
|
||||
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
|
||||
struct idxd_cdev_context *cdev_ctx;
|
||||
|
||||
cdev_ctx = &ictx[wq->idxd->type];
|
||||
if (cdev_state == CDEV_NORMAL)
|
||||
cdev_del(&idxd_cdev->cdev);
|
||||
device_unregister(idxd_cdev->dev);
|
||||
/*
|
||||
* The device_type->release() will be called on the device and free
|
||||
* the allocated struct device. We can just forget it.
|
||||
*/
|
||||
ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
|
||||
idxd_cdev->dev = NULL;
|
||||
idxd_cdev->minor = -1;
|
||||
}
|
||||
|
||||
int idxd_wq_add_cdev(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
|
||||
struct cdev *cdev = &idxd_cdev->cdev;
|
||||
struct device *dev;
|
||||
int rc;
|
||||
|
||||
rc = idxd_wq_cdev_dev_setup(wq);
|
||||
rc = dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
|
||||
idxd->id, wq->id);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
goto err;
|
||||
|
||||
dev = idxd_cdev->dev;
|
||||
wq->idxd_cdev = idxd_cdev;
|
||||
cdev_init(cdev, &idxd_cdev_fops);
|
||||
cdev_set_parent(cdev, &dev->kobj);
|
||||
rc = cdev_add(cdev, dev->devt, 1);
|
||||
rc = cdev_device_add(cdev, dev);
|
||||
if (rc) {
|
||||
dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
|
||||
idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
|
||||
return rc;
|
||||
goto err;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&idxd_cdev->err_queue);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
put_device(dev);
|
||||
wq->idxd_cdev = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void idxd_wq_del_cdev(struct idxd_wq *wq)
|
||||
{
|
||||
idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
struct idxd_cdev_context *cdev_ctx;
|
||||
|
||||
cdev_ctx = &ictx[wq->idxd->type];
|
||||
idxd_cdev = wq->idxd_cdev;
|
||||
wq->idxd_cdev = NULL;
|
||||
cdev_device_del(&idxd_cdev->cdev, &idxd_cdev->dev);
|
||||
put_device(&idxd_cdev->dev);
|
||||
}
|
||||
|
||||
int idxd_cdev_register(void)
|
||||
|
@ -169,8 +169,6 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
|
||||
desc->id = i;
|
||||
desc->wq = wq;
|
||||
desc->cpu = -1;
|
||||
dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan);
|
||||
desc->txd.tx_submit = idxd_dma_tx_submit;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -378,7 +376,8 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
|
||||
|
||||
if (idxd_device_is_halted(idxd)) {
|
||||
dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
|
||||
*status = IDXD_CMDSTS_HW_ERR;
|
||||
if (status)
|
||||
*status = IDXD_CMDSTS_HW_ERR;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,10 @@
|
||||
|
||||
static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
|
||||
{
|
||||
return container_of(c, struct idxd_wq, dma_chan);
|
||||
struct idxd_dma_chan *idxd_chan;
|
||||
|
||||
idxd_chan = container_of(c, struct idxd_dma_chan, chan);
|
||||
return idxd_chan->wq;
|
||||
}
|
||||
|
||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||
@ -144,7 +147,7 @@ static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
|
||||
{
|
||||
}
|
||||
|
||||
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct dma_chan *c = tx->chan;
|
||||
struct idxd_wq *wq = to_idxd_wq(c);
|
||||
@ -165,14 +168,25 @@ dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
|
||||
static void idxd_dma_release(struct dma_device *device)
|
||||
{
|
||||
struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
|
||||
|
||||
kfree(idxd_dma);
|
||||
}
|
||||
|
||||
int idxd_register_dma_device(struct idxd_device *idxd)
|
||||
{
|
||||
struct dma_device *dma = &idxd->dma_dev;
|
||||
struct idxd_dma_dev *idxd_dma;
|
||||
struct dma_device *dma;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
int rc;
|
||||
|
||||
idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
|
||||
if (!idxd_dma)
|
||||
return -ENOMEM;
|
||||
|
||||
dma = &idxd_dma->dma;
|
||||
INIT_LIST_HEAD(&dma->channels);
|
||||
dma->dev = &idxd->pdev->dev;
|
||||
dma->dev = dev;
|
||||
|
||||
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
|
||||
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
|
||||
@ -188,35 +202,72 @@ int idxd_register_dma_device(struct idxd_device *idxd)
|
||||
dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
|
||||
dma->device_free_chan_resources = idxd_dma_free_chan_resources;
|
||||
|
||||
return dma_async_device_register(&idxd->dma_dev);
|
||||
rc = dma_async_device_register(dma);
|
||||
if (rc < 0) {
|
||||
kfree(idxd_dma);
|
||||
return rc;
|
||||
}
|
||||
|
||||
idxd_dma->idxd = idxd;
|
||||
/*
|
||||
* This pointer is protected by the refs taken by the dma_chan. It will remain valid
|
||||
* as long as there are outstanding channels.
|
||||
*/
|
||||
idxd->idxd_dma = idxd_dma;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_unregister_dma_device(struct idxd_device *idxd)
|
||||
{
|
||||
dma_async_device_unregister(&idxd->dma_dev);
|
||||
dma_async_device_unregister(&idxd->idxd_dma->dma);
|
||||
}
|
||||
|
||||
int idxd_register_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
struct idxd_device *idxd = wq->idxd;
|
||||
struct dma_device *dma = &idxd->dma_dev;
|
||||
struct dma_chan *chan = &wq->dma_chan;
|
||||
int rc;
|
||||
struct dma_device *dma = &idxd->idxd_dma->dma;
|
||||
struct device *dev = &idxd->pdev->dev;
|
||||
struct idxd_dma_chan *idxd_chan;
|
||||
struct dma_chan *chan;
|
||||
int rc, i;
|
||||
|
||||
memset(&wq->dma_chan, 0, sizeof(struct dma_chan));
|
||||
idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
|
||||
if (!idxd_chan)
|
||||
return -ENOMEM;
|
||||
|
||||
chan = &idxd_chan->chan;
|
||||
chan->device = dma;
|
||||
list_add_tail(&chan->device_node, &dma->channels);
|
||||
|
||||
for (i = 0; i < wq->num_descs; i++) {
|
||||
struct idxd_desc *desc = wq->descs[i];
|
||||
|
||||
dma_async_tx_descriptor_init(&desc->txd, chan);
|
||||
desc->txd.tx_submit = idxd_dma_tx_submit;
|
||||
}
|
||||
|
||||
rc = dma_async_device_channel_register(dma, chan);
|
||||
if (rc < 0)
|
||||
if (rc < 0) {
|
||||
kfree(idxd_chan);
|
||||
return rc;
|
||||
}
|
||||
|
||||
wq->idxd_chan = idxd_chan;
|
||||
idxd_chan->wq = wq;
|
||||
get_device(&wq->conf_dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void idxd_unregister_dma_channel(struct idxd_wq *wq)
|
||||
{
|
||||
struct dma_chan *chan = &wq->dma_chan;
|
||||
struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
|
||||
struct dma_chan *chan = &idxd_chan->chan;
|
||||
struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
|
||||
|
||||
dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
|
||||
dma_async_device_channel_unregister(&idxd_dma->dma, chan);
|
||||
list_del(&chan->device_node);
|
||||
kfree(wq->idxd_chan);
|
||||
wq->idxd_chan = NULL;
|
||||
put_device(&wq->conf_dev);
|
||||
}
|
||||
|
@ -14,6 +14,9 @@
|
||||
|
||||
extern struct kmem_cache *idxd_desc_pool;
|
||||
|
||||
struct idxd_device;
|
||||
struct idxd_wq;
|
||||
|
||||
#define IDXD_REG_TIMEOUT 50
|
||||
#define IDXD_DRAIN_TIMEOUT 5000
|
||||
|
||||
@ -68,10 +71,10 @@ enum idxd_wq_type {
|
||||
};
|
||||
|
||||
struct idxd_cdev {
|
||||
struct idxd_wq *wq;
|
||||
struct cdev cdev;
|
||||
struct device *dev;
|
||||
struct device dev;
|
||||
int minor;
|
||||
struct wait_queue_head err_queue;
|
||||
};
|
||||
|
||||
#define IDXD_ALLOCATED_BATCH_SIZE 128U
|
||||
@ -88,10 +91,16 @@ enum idxd_complete_type {
|
||||
IDXD_COMPLETE_ABORT,
|
||||
};
|
||||
|
||||
struct idxd_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct idxd_wq *wq;
|
||||
};
|
||||
|
||||
struct idxd_wq {
|
||||
void __iomem *dportal;
|
||||
struct device conf_dev;
|
||||
struct idxd_cdev idxd_cdev;
|
||||
struct idxd_cdev *idxd_cdev;
|
||||
struct wait_queue_head err_queue;
|
||||
struct idxd_device *idxd;
|
||||
int id;
|
||||
enum idxd_wq_type type;
|
||||
@ -112,7 +121,7 @@ struct idxd_wq {
|
||||
int compls_size;
|
||||
struct idxd_desc **descs;
|
||||
struct sbitmap_queue sbq;
|
||||
struct dma_chan dma_chan;
|
||||
struct idxd_dma_chan *idxd_chan;
|
||||
char name[WQ_NAME_SIZE + 1];
|
||||
u64 max_xfer_bytes;
|
||||
u32 max_batch_size;
|
||||
@ -147,6 +156,11 @@ enum idxd_device_flag {
|
||||
IDXD_FLAG_CMD_RUNNING,
|
||||
};
|
||||
|
||||
struct idxd_dma_dev {
|
||||
struct idxd_device *idxd;
|
||||
struct dma_device dma;
|
||||
};
|
||||
|
||||
struct idxd_device {
|
||||
enum idxd_type type;
|
||||
struct device conf_dev;
|
||||
@ -191,7 +205,7 @@ struct idxd_device {
|
||||
int num_wq_irqs;
|
||||
struct idxd_irq_entry *irq_entries;
|
||||
|
||||
struct dma_device dma_dev;
|
||||
struct idxd_dma_dev *idxd_dma;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct work;
|
||||
};
|
||||
@ -313,7 +327,6 @@ void idxd_unregister_dma_channel(struct idxd_wq *wq);
|
||||
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
|
||||
void idxd_dma_complete_txd(struct idxd_desc *desc,
|
||||
enum idxd_complete_type comp_type);
|
||||
dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx);
|
||||
|
||||
/* cdev */
|
||||
int idxd_cdev_register(void);
|
||||
|
@ -175,7 +175,7 @@ static int idxd_setup_internals(struct idxd_device *idxd)
|
||||
wq->id = i;
|
||||
wq->idxd = idxd;
|
||||
mutex_init(&wq->wq_lock);
|
||||
wq->idxd_cdev.minor = -1;
|
||||
init_waitqueue_head(&wq->err_queue);
|
||||
wq->max_xfer_bytes = idxd->max_xfer_bytes;
|
||||
wq->max_batch_size = idxd->max_batch_size;
|
||||
wq->wqcfg = devm_kzalloc(dev, idxd->wqcfg_size, GFP_KERNEL);
|
||||
|
@ -75,7 +75,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
struct idxd_wq *wq = &idxd->wqs[id];
|
||||
|
||||
if (wq->type == IDXD_WQT_USER)
|
||||
wake_up_interruptible(&wq->idxd_cdev.err_queue);
|
||||
wake_up_interruptible(&wq->err_queue);
|
||||
} else {
|
||||
int i;
|
||||
|
||||
@ -83,7 +83,7 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
|
||||
struct idxd_wq *wq = &idxd->wqs[i];
|
||||
|
||||
if (wq->type == IDXD_WQT_USER)
|
||||
wake_up_interruptible(&wq->idxd_cdev.err_queue);
|
||||
wake_up_interruptible(&wq->err_queue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1052,8 +1052,16 @@ static ssize_t wq_cdev_minor_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
|
||||
int minor = -1;
|
||||
|
||||
return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
|
||||
mutex_lock(&wq->wq_lock);
|
||||
if (wq->idxd_cdev)
|
||||
minor = wq->idxd_cdev->minor;
|
||||
mutex_unlock(&wq->wq_lock);
|
||||
|
||||
if (minor == -1)
|
||||
return -ENXIO;
|
||||
return sysfs_emit(buf, "%d\n", minor);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_wq_cdev_minor =
|
||||
|
@ -75,6 +75,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
}
|
||||
|
||||
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
|
||||
/* flush the cache before commit the IB */
|
||||
ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
|
||||
|
||||
if (!vm)
|
||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||
|
@ -643,6 +643,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
|
||||
|
||||
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
|
||||
hdcp_work[0].attr = data_attr;
|
||||
sysfs_bin_attr_init(&hdcp_work[0].attr);
|
||||
|
||||
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
|
||||
DRM_WARN("Failed to create device file hdcp_srm");
|
||||
|
@ -2504,6 +2504,10 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
plane_state->triplebuffer_flips = true;
|
||||
}
|
||||
}
|
||||
if (update_type == UPDATE_TYPE_FULL) {
|
||||
/* force vsync flip when reconfiguring pipes to prevent underflow */
|
||||
plane_state->flip_immediate = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2012-17 Advanced Micro Devices, Inc.
|
||||
* Copyright 2012-2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
@ -181,11 +181,14 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,
|
||||
else
|
||||
Set HUBP_VREADY_AT_OR_AFTER_VSYNC = 0
|
||||
*/
|
||||
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
|
||||
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
|
||||
value = 1;
|
||||
} else
|
||||
value = 0;
|
||||
if (pipe_dest->htotal != 0) {
|
||||
if ((pipe_dest->vstartup_start - (pipe_dest->vready_offset+pipe_dest->vupdate_width
|
||||
+ pipe_dest->vupdate_offset) / pipe_dest->htotal) <= pipe_dest->vblank_end) {
|
||||
value = 1;
|
||||
} else
|
||||
value = 0;
|
||||
}
|
||||
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);
|
||||
}
|
||||
|
||||
|
@ -789,6 +789,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
|
||||
TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
|
||||
hdcp->connection.is_hdcp2_revoked = 1;
|
||||
status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
|
||||
} else {
|
||||
status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&psp->hdcp_context.mutex);
|
||||
|
@ -382,7 +382,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_active_call static void
|
||||
intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
|
@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
|
||||
struct i915_ggtt_view view;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
chunk = roundup(chunk, tile_row_pages(obj));
|
||||
chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
|
||||
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
view.partial.offset = rounddown(page_offset, chunk);
|
||||
|
@ -628,7 +628,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
|
||||
|
||||
err = pin_pt_dma(vm, pde->pt.base);
|
||||
if (err) {
|
||||
i915_gem_object_put(pde->pt.base);
|
||||
free_pd(vm, pde);
|
||||
return err;
|
||||
}
|
||||
|
@ -652,8 +652,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
|
||||
* banks of memory are paired and unswizzled on the
|
||||
* uneven portion, so leave that as unknown.
|
||||
*/
|
||||
if (intel_uncore_read(uncore, C0DRB3) ==
|
||||
intel_uncore_read(uncore, C1DRB3)) {
|
||||
if (intel_uncore_read16(uncore, C0DRB3) ==
|
||||
intel_uncore_read16(uncore, C1DRB3)) {
|
||||
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
|
||||
swizzle_y = I915_BIT_6_SWIZZLE_9;
|
||||
}
|
||||
|
@ -1159,7 +1159,8 @@ static int auto_active(struct i915_active *ref)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void auto_retire(struct i915_active *ref)
|
||||
__i915_active_call static void
|
||||
auto_retire(struct i915_active *ref)
|
||||
{
|
||||
i915_active_put(ref);
|
||||
}
|
||||
|
@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
|
||||
dp_audio_setup_acr(audio);
|
||||
dp_audio_safe_to_exit_level(audio);
|
||||
dp_audio_enable(audio, true);
|
||||
dp_display_signal_audio_start(dp_display);
|
||||
dp_display->audio_enabled = true;
|
||||
|
||||
end:
|
||||
|
@ -176,6 +176,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dp_display_signal_audio_start(struct msm_dp *dp_display)
|
||||
{
|
||||
struct dp_display_private *dp;
|
||||
|
||||
dp = container_of(dp_display, struct dp_display_private, dp_display);
|
||||
|
||||
reinit_completion(&dp->audio_comp);
|
||||
}
|
||||
|
||||
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
|
||||
{
|
||||
struct dp_display_private *dp;
|
||||
@ -620,7 +629,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
|
||||
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
|
||||
|
||||
/* signal the disconnect event early to ensure proper teardown */
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(g_dp_display, false);
|
||||
|
||||
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
|
||||
@ -841,7 +849,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
|
||||
/* wait only if audio was enabled */
|
||||
if (dp_display->audio_enabled) {
|
||||
/* signal the disconnect event */
|
||||
reinit_completion(&dp->audio_comp);
|
||||
dp_display_handle_plugged_change(dp_display, false);
|
||||
if (!wait_for_completion_timeout(&dp->audio_comp,
|
||||
HZ * 5))
|
||||
|
@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
|
||||
int dp_display_request_irq(struct msm_dp *dp_display);
|
||||
bool dp_display_check_video_test(struct msm_dp *dp_display);
|
||||
int dp_display_get_test_bpp(struct msm_dp *dp_display);
|
||||
void dp_display_signal_audio_start(struct msm_dp *dp_display);
|
||||
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
|
||||
|
||||
#endif /* _DP_DISPLAY_H_ */
|
||||
|
@ -1559,6 +1559,7 @@ struct radeon_dpm {
|
||||
void *priv;
|
||||
u32 new_active_crtcs;
|
||||
int new_active_crtc_count;
|
||||
int high_pixelclock_count;
|
||||
u32 current_active_crtcs;
|
||||
int current_active_crtc_count;
|
||||
bool single_display;
|
||||
|
@ -2126,11 +2126,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
|
||||
return state_index;
|
||||
/* last mode is usually default, array is low to high */
|
||||
for (i = 0; i < num_modes; i++) {
|
||||
rdev->pm.power_state[state_index].clock_info =
|
||||
kcalloc(1, sizeof(struct radeon_pm_clock_info),
|
||||
GFP_KERNEL);
|
||||
/* avoid memory leaks from invalid modes or unknown frev. */
|
||||
if (!rdev->pm.power_state[state_index].clock_info) {
|
||||
rdev->pm.power_state[state_index].clock_info =
|
||||
kzalloc(sizeof(struct radeon_pm_clock_info),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
if (!rdev->pm.power_state[state_index].clock_info)
|
||||
return state_index;
|
||||
goto out;
|
||||
rdev->pm.power_state[state_index].num_clock_modes = 1;
|
||||
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
|
||||
switch (frev) {
|
||||
@ -2249,17 +2252,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
/* free any unused clock_info allocation. */
|
||||
if (state_index && state_index < num_modes) {
|
||||
kfree(rdev->pm.power_state[state_index].clock_info);
|
||||
rdev->pm.power_state[state_index].clock_info = NULL;
|
||||
}
|
||||
|
||||
/* last mode is usually default */
|
||||
if (rdev->pm.default_power_state_index == -1) {
|
||||
if (state_index && rdev->pm.default_power_state_index == -1) {
|
||||
rdev->pm.power_state[state_index - 1].type =
|
||||
POWER_STATE_TYPE_DEFAULT;
|
||||
rdev->pm.default_power_state_index = state_index - 1;
|
||||
rdev->pm.power_state[state_index - 1].default_clock_mode =
|
||||
&rdev->pm.power_state[state_index - 1].clock_info[0];
|
||||
rdev->pm.power_state[state_index].flags &=
|
||||
rdev->pm.power_state[state_index - 1].flags &=
|
||||
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
|
||||
rdev->pm.power_state[state_index].misc = 0;
|
||||
rdev->pm.power_state[state_index].misc2 = 0;
|
||||
rdev->pm.power_state[state_index - 1].misc = 0;
|
||||
rdev->pm.power_state[state_index - 1].misc2 = 0;
|
||||
}
|
||||
return state_index;
|
||||
}
|
||||
|
@ -1747,6 +1747,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
|
||||
struct drm_device *ddev = rdev->ddev;
|
||||
struct drm_crtc *crtc;
|
||||
struct radeon_crtc *radeon_crtc;
|
||||
struct radeon_connector *radeon_connector;
|
||||
|
||||
if (!rdev->pm.dpm_enabled)
|
||||
return;
|
||||
@ -1756,6 +1757,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
|
||||
/* update active crtc counts */
|
||||
rdev->pm.dpm.new_active_crtcs = 0;
|
||||
rdev->pm.dpm.new_active_crtc_count = 0;
|
||||
rdev->pm.dpm.high_pixelclock_count = 0;
|
||||
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
|
||||
list_for_each_entry(crtc,
|
||||
&ddev->mode_config.crtc_list, head) {
|
||||
@ -1763,6 +1765,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
|
||||
if (crtc->enabled) {
|
||||
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
|
||||
rdev->pm.dpm.new_active_crtc_count++;
|
||||
if (!radeon_crtc->connector)
|
||||
continue;
|
||||
|
||||
radeon_connector = to_radeon_connector(radeon_crtc->connector);
|
||||
if (radeon_connector->pixelclock_for_modeset > 297000)
|
||||
rdev->pm.dpm.high_pixelclock_count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2982,6 +2982,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
(rdev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
|
||||
if (rdev->pm.dpm.high_pixelclock_count > 1)
|
||||
disable_sclk_switching = true;
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
|
@ -209,9 +209,9 @@ int occ_update_response(struct occ *occ)
|
||||
return rc;
|
||||
|
||||
/* limit the maximum rate of polling the OCC */
|
||||
if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
|
||||
if (time_after(jiffies, occ->next_update)) {
|
||||
rc = occ_poll(occ);
|
||||
occ->last_update = jiffies;
|
||||
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
|
||||
} else {
|
||||
rc = occ->last_error;
|
||||
}
|
||||
@ -1089,6 +1089,7 @@ int occ_setup(struct occ *occ, const char *name)
|
||||
return rc;
|
||||
}
|
||||
|
||||
occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
|
||||
occ_parse_poll_response(occ);
|
||||
|
||||
rc = occ_setup_sensor_attrs(occ);
|
||||
|
@ -99,7 +99,7 @@ struct occ {
|
||||
u8 poll_cmd_data; /* to perform OCC poll command */
|
||||
int (*send_cmd)(struct occ *occ, u8 *cmd);
|
||||
|
||||
unsigned long last_update;
|
||||
unsigned long next_update;
|
||||
struct mutex lock; /* lock OCC access */
|
||||
|
||||
struct device *hwmon;
|
||||
|
@ -564,7 +564,7 @@ static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed)
|
||||
|
||||
static int mtk_i2c_max_step_cnt(unsigned int target_speed)
|
||||
{
|
||||
if (target_speed > I2C_MAX_FAST_MODE_FREQ)
|
||||
if (target_speed > I2C_MAX_FAST_MODE_PLUS_FREQ)
|
||||
return MAX_HS_STEP_CNT_DIV;
|
||||
else
|
||||
return MAX_STEP_CNT_DIV;
|
||||
@ -635,7 +635,7 @@ static int mtk_i2c_check_ac_timing(struct mtk_i2c *i2c,
|
||||
if (sda_min > sda_max)
|
||||
return -3;
|
||||
|
||||
if (check_speed > I2C_MAX_FAST_MODE_FREQ) {
|
||||
if (check_speed > I2C_MAX_FAST_MODE_PLUS_FREQ) {
|
||||
if (i2c->dev_comp->ltiming_adjust) {
|
||||
i2c->ac_timing.hs = I2C_TIME_DEFAULT_VALUE |
|
||||
(sample_cnt << 12) | (high_cnt << 8);
|
||||
@ -850,7 +850,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
|
||||
|
||||
control_reg = mtk_i2c_readw(i2c, OFFSET_CONTROL) &
|
||||
~(I2C_CONTROL_DIR_CHANGE | I2C_CONTROL_RS);
|
||||
if ((i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ) || (left_num >= 1))
|
||||
if ((i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ) || (left_num >= 1))
|
||||
control_reg |= I2C_CONTROL_RS;
|
||||
|
||||
if (i2c->op == I2C_MASTER_WRRD)
|
||||
@ -1067,7 +1067,8 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
|
||||
}
|
||||
}
|
||||
|
||||
if (i2c->auto_restart && num >= 2 && i2c->speed_hz > I2C_MAX_FAST_MODE_FREQ)
|
||||
if (i2c->auto_restart && num >= 2 &&
|
||||
i2c->speed_hz > I2C_MAX_FAST_MODE_PLUS_FREQ)
|
||||
/* ignore the first restart irq after the master code,
|
||||
* otherwise the first transfer will be discarded.
|
||||
*/
|
||||
|
@ -440,8 +440,13 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
sizeof(rdwr_arg)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Put an arbitrary limit on the number of messages that can
|
||||
* be sent at once */
|
||||
if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Put an arbitrary limit on the number of messages that can
|
||||
* be sent at once
|
||||
*/
|
||||
if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -211,7 +211,6 @@ config DMARD10
|
||||
config HID_SENSOR_ACCEL_3D
|
||||
depends on HID_SENSOR_HUB
|
||||
select IIO_BUFFER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
select HID_SENSOR_IIO_COMMON
|
||||
select HID_SENSOR_IIO_TRIGGER
|
||||
tristate "HID Accelerometers 3D"
|
||||
|
@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
|
||||
tristate "Common module (trigger) for all HID Sensor IIO drivers"
|
||||
depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
|
||||
select IIO_TRIGGER
|
||||
select IIO_TRIGGERED_BUFFER
|
||||
help
|
||||
Say yes here to build trigger support for HID sensors.
|
||||
Triggers will be send if all requested attributes were read.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user