Snap for 7394016 from 77aa2f2bf0
to android12-5.10-keystone-qcom-release
Change-Id: If5528805d931eff83af143d059638ce25e598d06
This commit is contained in:
commit
9782146038
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/perl
|
||||
#!/usr/bin/env perl
|
||||
use strict;
|
||||
use Text::Tabs;
|
||||
use Getopt::Long;
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
|
||||
#
|
||||
# Copyright (c) 2010 Rising Tide Systems
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
# add symbolic names to read_msr / write_msr in trace
|
||||
# decode_msr msr-index.h < trace
|
||||
import sys
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/perl
|
||||
#!/usr/bin/env perl
|
||||
# This is a POC (proof of concept or piece of crap, take your pick) for reading the
|
||||
# text representation of trace output related to page allocation. It makes an attempt
|
||||
# to extract some high-level information on what is going on. The accuracy of the parser
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/perl
|
||||
#!/usr/bin/env perl
|
||||
# This is a POC for reading the text representation of trace output related to
|
||||
# page reclaim. It makes an attempt to extract some high-level information on
|
||||
# what is going on. The accuracy of the parser may vary
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 38
|
||||
SUBLEVEL = 39
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1373,7 +1373,7 @@
|
||||
__traceiter_android_rvh_select_task_rq_rt
|
||||
__traceiter_android_vh_cpu_idle_enter
|
||||
__traceiter_android_vh_cpu_idle_exit
|
||||
__traceiter_android_vh_gic_v3_set_affinity
|
||||
__traceiter_android_rvh_gic_v3_set_affinity
|
||||
__traceiter_android_vh_ipi_stop
|
||||
__traceiter_android_vh_scheduler_tick
|
||||
__traceiter_cpu_idle
|
||||
@ -1412,7 +1412,7 @@
|
||||
__tracepoint_android_rvh_select_task_rq_rt
|
||||
__tracepoint_android_vh_cpu_idle_enter
|
||||
__tracepoint_android_vh_cpu_idle_exit
|
||||
__tracepoint_android_vh_gic_v3_set_affinity
|
||||
__tracepoint_android_rvh_gic_v3_set_affinity
|
||||
__tracepoint_android_vh_ipi_stop
|
||||
__tracepoint_android_vh_scheduler_tick
|
||||
__tracepoint_cpu_idle
|
||||
|
@ -982,7 +982,6 @@
|
||||
kasprintf
|
||||
kernel_cpustat
|
||||
kernel_kobj
|
||||
kernel_read
|
||||
kernel_restart
|
||||
kern_mount
|
||||
kern_unmount
|
||||
|
@ -11,6 +11,8 @@
|
||||
add_uevent_var
|
||||
add_wait_queue
|
||||
adjust_managed_page_count
|
||||
aes_encrypt
|
||||
aes_expandkey
|
||||
alarm_cancel
|
||||
alarm_init
|
||||
alarm_start_relative
|
||||
@ -2403,6 +2405,7 @@
|
||||
__traceiter_android_rvh_find_busiest_queue
|
||||
__traceiter_android_rvh_find_lowest_rq
|
||||
__traceiter_android_rvh_flush_task
|
||||
__traceiter_android_rvh_gic_v3_set_affinity
|
||||
__traceiter_android_rvh_irqs_disable
|
||||
__traceiter_android_rvh_irqs_enable
|
||||
__traceiter_android_rvh_migrate_queued_task
|
||||
@ -2488,6 +2491,7 @@
|
||||
__tracepoint_android_rvh_find_busiest_queue
|
||||
__tracepoint_android_rvh_find_lowest_rq
|
||||
__tracepoint_android_rvh_flush_task
|
||||
__tracepoint_android_rvh_gic_v3_set_affinity
|
||||
__tracepoint_android_rvh_irqs_disable
|
||||
__tracepoint_android_rvh_irqs_enable
|
||||
__tracepoint_android_rvh_migrate_queued_task
|
||||
@ -2528,6 +2532,8 @@
|
||||
__tracepoint_android_vh_binder_set_priority
|
||||
__tracepoint_android_vh_binder_transaction_init
|
||||
__tracepoint_android_vh_binder_wakeup_ilocked
|
||||
__tracepoint_android_vh_check_uninterruptible_tasks
|
||||
__tracepoint_android_vh_check_uninterruptible_tasks_dn
|
||||
__tracepoint_android_vh_cpu_idle_enter
|
||||
__tracepoint_android_vh_cpu_idle_exit
|
||||
__tracepoint_android_vh_dump_throttled_rt_tasks
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <asm/vdso_datapage.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <linux/kbuild.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include "signal.h"
|
||||
|
||||
/*
|
||||
@ -148,6 +149,8 @@ int main(void)
|
||||
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
|
||||
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
|
||||
#endif
|
||||
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
|
||||
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
|
||||
BLANK();
|
||||
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
||||
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
|
||||
|
@ -3,7 +3,9 @@
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/opcodes-sec.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
#include <asm/unwind.h>
|
||||
@ -27,7 +29,14 @@ UNWIND( .fnstart)
|
||||
UNWIND( .save {r4-r7})
|
||||
ldm r12, {r4-r7}
|
||||
\instr
|
||||
pop {r4-r7}
|
||||
ldr r4, [sp, #36]
|
||||
cmp r4, #0
|
||||
beq 1f // No quirk structure
|
||||
ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
|
||||
cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
|
||||
bne 1f // No quirk present
|
||||
str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
|
||||
1: pop {r4-r7}
|
||||
ldr r12, [sp, #(4 * 4)]
|
||||
stm r12, {r0-r3}
|
||||
bx lr
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm_types.h>
|
||||
@ -25,6 +26,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
if (!idmap_pgd)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Function graph tracer state gets incosistent when the kernel
|
||||
* calls functions that never return (aka suspend finishers) hence
|
||||
* disable graph tracing during their execution.
|
||||
*/
|
||||
pause_graph_tracing();
|
||||
|
||||
/*
|
||||
* Provide a temporary page table with an identity mapping for
|
||||
* the MMU-enable code, required for resuming. On successful
|
||||
@ -32,6 +40,9 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
* back to the correct page tables.
|
||||
*/
|
||||
ret = __cpu_suspend(arg, fn, __mpidr);
|
||||
|
||||
unpause_graph_tracing();
|
||||
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
u32 __mpidr = cpu_logical_map(smp_processor_id());
|
||||
return __cpu_suspend(arg, fn, __mpidr);
|
||||
int ret;
|
||||
|
||||
pause_graph_tracing();
|
||||
ret = __cpu_suspend(arg, fn, __mpidr);
|
||||
unpause_graph_tracing();
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define idmap_pgd NULL
|
||||
#endif
|
||||
|
@ -157,7 +157,10 @@ libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
||||
boot := arch/arm64/boot
|
||||
KBUILD_IMAGE := $(boot)/Image.gz
|
||||
|
||||
# Don't compile Image in mixed build with "all" target
|
||||
ifndef KBUILD_MIXED_TREE
|
||||
all: Image.gz
|
||||
endif
|
||||
|
||||
|
||||
Image: vmlinux
|
||||
|
@ -13,6 +13,7 @@ CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_IKHEADERS=y
|
||||
CONFIG_UCLAMP_TASK=y
|
||||
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
@ -43,6 +44,7 @@ CONFIG_SLAB_FREELIST_RANDOM=y
|
||||
CONFIG_SLAB_FREELIST_HARDENED=y
|
||||
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
|
||||
CONFIG_PROFILING=y
|
||||
# CONFIG_ZONE_DMA is not set
|
||||
CONFIG_ARCH_SUNXI=y
|
||||
CONFIG_ARCH_HISI=y
|
||||
CONFIG_ARCH_QCOM=y
|
||||
@ -363,6 +365,7 @@ CONFIG_HW_RANDOM=y
|
||||
# CONFIG_DEVPORT is not set
|
||||
# CONFIG_I2C_COMPAT is not set
|
||||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
CONFIG_I3C=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_SPMI=y
|
||||
# CONFIG_SPMI_MSM_PMIC_ARB is not set
|
||||
@ -661,6 +664,4 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_TRACE_MMIO_ACCESS=y
|
||||
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
|
||||
CONFIG_HIST_TRIGGERS=y
|
||||
CONFIG_KUNIT=y
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
||||
|
@ -48,8 +48,6 @@ long get_mte_ctrl(struct task_struct *task);
|
||||
int mte_ptrace_copy_tags(struct task_struct *child, long request,
|
||||
unsigned long addr, unsigned long data);
|
||||
|
||||
void mte_assign_mem_tag_range(void *addr, size_t size);
|
||||
|
||||
#else /* CONFIG_ARM64_MTE */
|
||||
|
||||
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
|
||||
@ -88,10 +86,6 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline void mte_assign_mem_tag_range(void *addr, size_t size)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_MTE */
|
||||
|
||||
#ifdef CONFIG_KASAN_HW_TAGS
|
||||
|
@ -156,11 +156,13 @@ struct thread_struct {
|
||||
struct ptrauth_keys_kernel keys_kernel;
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_MTE
|
||||
u64 sctlr_tcf0;
|
||||
u64 gcr_user_excl;
|
||||
#endif
|
||||
u64 sctlr_user;
|
||||
};
|
||||
|
||||
#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK
|
||||
|
||||
static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
||||
unsigned long *size)
|
||||
{
|
||||
@ -252,6 +254,8 @@ extern void release_thread(struct task_struct *);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
void set_task_sctlr_el1(u64 sctlr);
|
||||
|
||||
/* Thread switching */
|
||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
|
@ -185,26 +185,6 @@ void mte_check_tfsr_el1(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void update_sctlr_el1_tcf0(u64 tcf0)
|
||||
{
|
||||
/* ISB required for the kernel uaccess routines */
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void set_sctlr_el1_tcf0(u64 tcf0)
|
||||
{
|
||||
/*
|
||||
* mte_thread_switch() checks current->thread.sctlr_tcf0 as an
|
||||
* optimisation. Disable preemption so that it does not see
|
||||
* the variable update before the SCTLR_EL1.TCF0 one.
|
||||
*/
|
||||
preempt_disable();
|
||||
current->thread.sctlr_tcf0 = tcf0;
|
||||
update_sctlr_el1_tcf0(tcf0);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static void update_gcr_el1_excl(u64 excl)
|
||||
{
|
||||
|
||||
@ -237,31 +217,22 @@ void flush_mte_state(void)
|
||||
write_sysreg_s(0, SYS_TFSRE0_EL1);
|
||||
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
|
||||
/* disable tag checking */
|
||||
set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
|
||||
set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
|
||||
SCTLR_EL1_TCF0_NONE);
|
||||
/* reset tag generation mask */
|
||||
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
|
||||
}
|
||||
|
||||
void mte_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (!system_supports_mte())
|
||||
return;
|
||||
|
||||
/* avoid expensive SCTLR_EL1 accesses if no change */
|
||||
if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
|
||||
update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
|
||||
else
|
||||
isb();
|
||||
|
||||
/*
|
||||
* Check if an async tag exception occurred at EL1.
|
||||
*
|
||||
* Note: On the context switch path we rely on the dsb() present
|
||||
* in __switch_to() to guarantee that the indirect writes to TFSR_EL1
|
||||
* are synchronized before this point.
|
||||
* isb() above is required for the same reason.
|
||||
*
|
||||
*/
|
||||
isb();
|
||||
mte_check_tfsr_el1();
|
||||
}
|
||||
|
||||
@ -291,7 +262,7 @@ void mte_suspend_exit(void)
|
||||
|
||||
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
{
|
||||
u64 tcf0;
|
||||
u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
|
||||
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
|
||||
SYS_GCR_EL1_EXCL_MASK;
|
||||
|
||||
@ -300,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
|
||||
|
||||
switch (arg & PR_MTE_TCF_MASK) {
|
||||
case PR_MTE_TCF_NONE:
|
||||
tcf0 = SCTLR_EL1_TCF0_NONE;
|
||||
sctlr |= SCTLR_EL1_TCF0_NONE;
|
||||
break;
|
||||
case PR_MTE_TCF_SYNC:
|
||||
tcf0 = SCTLR_EL1_TCF0_SYNC;
|
||||
sctlr |= SCTLR_EL1_TCF0_SYNC;
|
||||
break;
|
||||
case PR_MTE_TCF_ASYNC:
|
||||
tcf0 = SCTLR_EL1_TCF0_ASYNC;
|
||||
sctlr |= SCTLR_EL1_TCF0_ASYNC;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (task != current) {
|
||||
task->thread.sctlr_tcf0 = tcf0;
|
||||
task->thread.sctlr_user = sctlr;
|
||||
task->thread.gcr_user_excl = gcr_excl;
|
||||
} else {
|
||||
set_sctlr_el1_tcf0(tcf0);
|
||||
set_task_sctlr_el1(sctlr);
|
||||
set_gcr_el1_excl(gcr_excl);
|
||||
}
|
||||
|
||||
@ -333,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task)
|
||||
|
||||
ret = incl << PR_MTE_TAG_SHIFT;
|
||||
|
||||
switch (task->thread.sctlr_tcf0) {
|
||||
switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
|
||||
case SCTLR_EL1_TCF0_NONE:
|
||||
ret |= PR_MTE_TCF_NONE;
|
||||
break;
|
||||
|
@ -538,6 +538,27 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
|
||||
write_sysreg(val, cntkctl_el1);
|
||||
}
|
||||
|
||||
static void update_sctlr_el1(u64 sctlr)
|
||||
{
|
||||
sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr);
|
||||
|
||||
/* ISB required for the kernel uaccess routines when setting TCF0. */
|
||||
isb();
|
||||
}
|
||||
|
||||
void set_task_sctlr_el1(u64 sctlr)
|
||||
{
|
||||
/*
|
||||
* __switch_to() checks current->thread.sctlr as an
|
||||
* optimisation. Disable preemption so that it does not see
|
||||
* the variable update before the SCTLR_EL1 one.
|
||||
*/
|
||||
preempt_disable();
|
||||
current->thread.sctlr_user = sctlr;
|
||||
update_sctlr_el1(sctlr);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Thread switching.
|
||||
*/
|
||||
@ -574,6 +595,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||||
* registers.
|
||||
*/
|
||||
mte_thread_switch(next);
|
||||
/* avoid expensive SCTLR_EL1 accesses if no change */
|
||||
if (prev->thread.sctlr_user != next->thread.sctlr_user)
|
||||
update_sctlr_el1(next->thread.sctlr_user);
|
||||
|
||||
trace_android_vh_is_fpsimd_save(prev, next);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
#!/usr/bin/env python
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Usage: unwcheck.py FILE
|
||||
|
@ -13,9 +13,19 @@
|
||||
#endif
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
/*
|
||||
* Clang prior to 13 had "mcount" instead of "_mcount":
|
||||
* https://reviews.llvm.org/D98881
|
||||
*/
|
||||
#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
|
||||
#define MCOUNT_NAME _mcount
|
||||
#else
|
||||
#define MCOUNT_NAME mcount
|
||||
#endif
|
||||
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#ifndef __ASSEMBLY__
|
||||
void _mcount(void);
|
||||
void MCOUNT_NAME(void);
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
return addr;
|
||||
@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
|
||||
* both auipc and jalr at the same time.
|
||||
*/
|
||||
|
||||
#define MCOUNT_ADDR ((unsigned long)_mcount)
|
||||
#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
|
||||
#define JALR_SIGN_MASK (0x00000800)
|
||||
#define JALR_OFFSET_MASK (0x00000fff)
|
||||
#define AUIPC_OFFSET_MASK (0xfffff000)
|
||||
|
@ -47,8 +47,8 @@
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
.global _mcount
|
||||
.set _mcount, ftrace_stub
|
||||
.global MCOUNT_NAME
|
||||
.set MCOUNT_NAME, ftrace_stub
|
||||
#endif
|
||||
ret
|
||||
ENDPROC(ftrace_stub)
|
||||
@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(_mcount)
|
||||
ENTRY(MCOUNT_NAME)
|
||||
la t4, ftrace_stub
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
la t0, ftrace_graph_return
|
||||
@ -124,6 +124,6 @@ do_trace:
|
||||
jalr t5
|
||||
RESTORE_ABI_STATE
|
||||
ret
|
||||
ENDPROC(_mcount)
|
||||
ENDPROC(MCOUNT_NAME)
|
||||
#endif
|
||||
EXPORT_SYMBOL(_mcount)
|
||||
EXPORT_SYMBOL(MCOUNT_NAME)
|
||||
|
@ -41,11 +41,10 @@ KASAN_SANITIZE := n
|
||||
$(obj)/vdso.o: $(obj)/vdso.so
|
||||
|
||||
# link rule for the .so file, .lds has to be first
|
||||
SYSCFLAGS_vdso.so.dbg = $(c_flags)
|
||||
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
|
||||
$(call if_changed,vdsold)
|
||||
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
|
||||
-Wl,--build-id=sha1 -Wl,--hash-style=both
|
||||
LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
|
||||
--build-id=sha1 --hash-style=both --eh-frame-hdr
|
||||
|
||||
# We also create a special relocatable object that should mirror the symbol
|
||||
# table and layout of the linked DSO. With ld --just-symbols we can then
|
||||
@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
|
||||
# actual build commands
|
||||
# The DSO images are built using a special linker script
|
||||
# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
|
||||
# Make sure only to export the intended __vdso_xxx symbol offsets.
|
||||
quiet_cmd_vdsold = VDSOLD $@
|
||||
cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
|
||||
-Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
|
||||
$(CROSS_COMPILE)objcopy \
|
||||
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
|
||||
cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
|
||||
$(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
|
||||
rm $@.tmp
|
||||
|
||||
# Extracts symbol offsets from the VDSO, converting them into an assembly file
|
||||
|
@ -17,6 +17,7 @@ config GCOV
|
||||
bool "Enable gcov support"
|
||||
depends on DEBUG_INFO
|
||||
depends on !KCOV
|
||||
depends on !MODULES
|
||||
help
|
||||
This option allows developers to retrieve coverage data from a UML
|
||||
session.
|
||||
|
@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
|
||||
|
||||
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
|
||||
obj-$(CONFIG_GPROF) += gprof_syms.o
|
||||
obj-$(CONFIG_GCOV) += gmon_syms.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
|
||||
|
@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
|
||||
ENTRY(_start)
|
||||
jiffies = jiffies_64;
|
||||
|
||||
VERSION {
|
||||
{
|
||||
local: *;
|
||||
};
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
PROVIDE (__executable_start = START);
|
||||
|
@ -1,16 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
||||
extern void __bb_init_func(void *) __attribute__((weak));
|
||||
EXPORT_SYMBOL(__bb_init_func);
|
||||
|
||||
extern void __gcov_init(void *) __attribute__((weak));
|
||||
EXPORT_SYMBOL(__gcov_init);
|
||||
extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak));
|
||||
EXPORT_SYMBOL(__gcov_merge_add);
|
||||
extern void __gcov_exit(void) __attribute__((weak));
|
||||
EXPORT_SYMBOL(__gcov_exit);
|
@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
|
||||
ENTRY(_start)
|
||||
jiffies = jiffies_64;
|
||||
|
||||
VERSION {
|
||||
{
|
||||
local: *;
|
||||
};
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
/* This must contain the right address - not quite the default ELF one.*/
|
||||
|
@ -262,8 +262,11 @@ BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 isoimage
|
||||
|
||||
PHONY += bzImage $(BOOT_TARGETS)
|
||||
|
||||
# Don't compile Image in mixed build with "all" target
|
||||
ifndef KBUILD_MIXED_TREE
|
||||
# Default kernel to build
|
||||
all: bzImage
|
||||
endif
|
||||
|
||||
# KBUILD_IMAGE specify target image being built
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
|
@ -15,6 +15,7 @@ CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_IKHEADERS=y
|
||||
CONFIG_UCLAMP_TASK=y
|
||||
CONFIG_UCLAMP_BUCKETS_COUNT=20
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_BLK_CGROUP=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
@ -329,6 +330,7 @@ CONFIG_HW_RANDOM=y
|
||||
CONFIG_HPET=y
|
||||
# CONFIG_I2C_COMPAT is not set
|
||||
# CONFIG_I2C_HELPER_AUTO is not set
|
||||
CONFIG_I3C=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_GPIOLIB=y
|
||||
CONFIG_GPIO_GENERIC_PLATFORM=y
|
||||
@ -595,5 +597,3 @@ CONFIG_SCHEDSTATS=y
|
||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||
CONFIG_TRACEFS_DISABLE_AUTOMOUNT=y
|
||||
CONFIG_UNWINDER_FRAME_POINTER=y
|
||||
CONFIG_KUNIT=y
|
||||
CONFIG_KUNIT_DEBUGFS=y
|
||||
|
@ -253,7 +253,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
|
||||
rv->err = wrmsr_safe_regs(rv->regs);
|
||||
}
|
||||
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
|
||||
int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
int err;
|
||||
struct msr_regs_info rv;
|
||||
@ -266,7 +266,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
|
||||
}
|
||||
EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
|
||||
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
|
||||
int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
|
||||
{
|
||||
int err;
|
||||
struct msr_regs_info rv;
|
||||
|
@ -199,6 +199,20 @@ struct bt_iter_data {
|
||||
bool reserved;
|
||||
};
|
||||
|
||||
static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
|
||||
unsigned int bitnr)
|
||||
{
|
||||
struct request *rq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
rq = tags->rqs[bitnr];
|
||||
if (!rq || !refcount_inc_not_zero(&rq->ref))
|
||||
rq = NULL;
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
return rq;
|
||||
}
|
||||
|
||||
static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
{
|
||||
struct bt_iter_data *iter_data = data;
|
||||
@ -206,18 +220,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
bool reserved = iter_data->reserved;
|
||||
struct request *rq;
|
||||
bool ret = true;
|
||||
|
||||
if (!reserved)
|
||||
bitnr += tags->nr_reserved_tags;
|
||||
rq = tags->rqs[bitnr];
|
||||
|
||||
/*
|
||||
* We can hit rq == NULL here, because the tagging functions
|
||||
* test and set the bit before assigning ->rqs[].
|
||||
*/
|
||||
if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx)
|
||||
return iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||
return true;
|
||||
rq = blk_mq_find_and_get_req(tags, bitnr);
|
||||
if (!rq)
|
||||
return true;
|
||||
|
||||
if (rq->q == hctx->queue && rq->mq_hctx == hctx)
|
||||
ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -264,6 +282,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
struct blk_mq_tags *tags = iter_data->tags;
|
||||
bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED;
|
||||
struct request *rq;
|
||||
bool ret = true;
|
||||
bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS);
|
||||
|
||||
if (!reserved)
|
||||
bitnr += tags->nr_reserved_tags;
|
||||
@ -272,16 +292,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
|
||||
* We can hit rq == NULL here, because the tagging functions
|
||||
* test and set the bit before assigning ->rqs[].
|
||||
*/
|
||||
if (iter_data->flags & BT_TAG_ITER_STATIC_RQS)
|
||||
if (iter_static_rqs)
|
||||
rq = tags->static_rqs[bitnr];
|
||||
else
|
||||
rq = tags->rqs[bitnr];
|
||||
rq = blk_mq_find_and_get_req(tags, bitnr);
|
||||
if (!rq)
|
||||
return true;
|
||||
if ((iter_data->flags & BT_TAG_ITER_STARTED) &&
|
||||
!blk_mq_request_started(rq))
|
||||
return true;
|
||||
return iter_data->fn(rq, iter_data->data, reserved);
|
||||
|
||||
if (!(iter_data->flags & BT_TAG_ITER_STARTED) ||
|
||||
blk_mq_request_started(rq))
|
||||
ret = iter_data->fn(rq, iter_data->data, reserved);
|
||||
if (!iter_static_rqs)
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -348,6 +371,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
||||
* indicates whether or not @rq is a reserved request. Return
|
||||
* true to continue iterating tags, false to stop.
|
||||
* @priv: Will be passed as second argument to @fn.
|
||||
*
|
||||
* We grab one request reference before calling @fn and release it after
|
||||
* @fn returns.
|
||||
*/
|
||||
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
|
||||
busy_tag_iter_fn *fn, void *priv)
|
||||
@ -516,6 +542,7 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
|
||||
tags->nr_tags = total_tags;
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
spin_lock_init(&tags->lock);
|
||||
|
||||
if (flags & BLK_MQ_F_TAG_HCTX_SHARED)
|
||||
return tags;
|
||||
|
@ -20,6 +20,12 @@ struct blk_mq_tags {
|
||||
struct request **rqs;
|
||||
struct request **static_rqs;
|
||||
struct list_head page_list;
|
||||
|
||||
/*
|
||||
* used to clear request reference in rqs[] before freeing one
|
||||
* request pool
|
||||
*/
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags,
|
||||
|
@ -928,6 +928,14 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
|
||||
return false;
|
||||
}
|
||||
|
||||
void blk_mq_put_rq_ref(struct request *rq)
|
||||
{
|
||||
if (is_flush_rq(rq, rq->mq_hctx))
|
||||
rq->end_io(rq, 0);
|
||||
else if (refcount_dec_and_test(&rq->ref))
|
||||
__blk_mq_free_request(rq);
|
||||
}
|
||||
|
||||
static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, void *priv, bool reserved)
|
||||
{
|
||||
@ -961,11 +969,7 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
|
||||
if (blk_mq_req_expired(rq, next))
|
||||
blk_mq_rq_timed_out(rq, reserved);
|
||||
|
||||
if (is_flush_rq(rq, hctx))
|
||||
rq->end_io(rq, 0);
|
||||
else if (refcount_dec_and_test(&rq->ref))
|
||||
__blk_mq_free_request(rq);
|
||||
|
||||
blk_mq_put_rq_ref(rq);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2273,6 +2277,45 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
||||
return BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static size_t order_to_size(unsigned int order)
|
||||
{
|
||||
return (size_t)PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
/* called before freeing request pool in @tags */
|
||||
static void blk_mq_clear_rq_mapping(struct blk_mq_tag_set *set,
|
||||
struct blk_mq_tags *tags, unsigned int hctx_idx)
|
||||
{
|
||||
struct blk_mq_tags *drv_tags = set->tags[hctx_idx];
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_entry(page, &tags->page_list, lru) {
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
unsigned long end = start + order_to_size(page->private);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < set->queue_depth; i++) {
|
||||
struct request *rq = drv_tags->rqs[i];
|
||||
unsigned long rq_addr = (unsigned long)rq;
|
||||
|
||||
if (rq_addr >= start && rq_addr < end) {
|
||||
WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
|
||||
cmpxchg(&drv_tags->rqs[i], rq, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait until all pending iteration is done.
|
||||
*
|
||||
* Request reference is cleared and it is guaranteed to be observed
|
||||
* after the ->lock is released.
|
||||
*/
|
||||
spin_lock_irqsave(&drv_tags->lock, flags);
|
||||
spin_unlock_irqrestore(&drv_tags->lock, flags);
|
||||
}
|
||||
|
||||
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
@ -2291,6 +2334,8 @@ void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
|
||||
}
|
||||
}
|
||||
|
||||
blk_mq_clear_rq_mapping(set, tags, hctx_idx);
|
||||
|
||||
while (!list_empty(&tags->page_list)) {
|
||||
page = list_first_entry(&tags->page_list, struct page, lru);
|
||||
list_del_init(&page->lru);
|
||||
@ -2350,11 +2395,6 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
return tags;
|
||||
}
|
||||
|
||||
static size_t order_to_size(unsigned int order)
|
||||
{
|
||||
return (size_t)PAGE_SIZE << order;
|
||||
}
|
||||
|
||||
static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
|
||||
unsigned int hctx_idx, int node)
|
||||
{
|
||||
@ -2573,16 +2613,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
|
||||
&hctx->cpuhp_dead);
|
||||
}
|
||||
|
||||
/*
|
||||
* Before freeing hw queue, clearing the flush request reference in
|
||||
* tags->rqs[] for avoiding potential UAF.
|
||||
*/
|
||||
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
|
||||
unsigned int queue_depth, struct request *flush_rq)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* The hw queue may not be mapped yet */
|
||||
if (!tags)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
|
||||
|
||||
for (i = 0; i < queue_depth; i++)
|
||||
cmpxchg(&tags->rqs[i], flush_rq, NULL);
|
||||
|
||||
/*
|
||||
* Wait until all pending iteration is done.
|
||||
*
|
||||
* Request reference is cleared and it is guaranteed to be observed
|
||||
* after the ->lock is released.
|
||||
*/
|
||||
spin_lock_irqsave(&tags->lock, flags);
|
||||
spin_unlock_irqrestore(&tags->lock, flags);
|
||||
}
|
||||
|
||||
/* hctx->ctxs will be freed in queue's release handler */
|
||||
static void blk_mq_exit_hctx(struct request_queue *q,
|
||||
struct blk_mq_tag_set *set,
|
||||
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
{
|
||||
struct request *flush_rq = hctx->fq->flush_rq;
|
||||
|
||||
if (blk_mq_hw_queue_mapped(hctx))
|
||||
blk_mq_tag_idle(hctx);
|
||||
|
||||
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
|
||||
set->queue_depth, flush_rq);
|
||||
if (set->ops->exit_request)
|
||||
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
|
||||
set->ops->exit_request(set, flush_rq, hctx_idx);
|
||||
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
@ -47,6 +47,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *start);
|
||||
void blk_mq_put_rq_ref(struct request *rq);
|
||||
|
||||
/*
|
||||
* Internal helpers for allocating/freeing the request map
|
||||
|
@ -1,5 +1,5 @@
|
||||
BRANCH=android12-5.10
|
||||
KMI_GENERATION=4
|
||||
KMI_GENERATION=5
|
||||
|
||||
LLVM=1
|
||||
DEPMOD=depmod
|
||||
|
@ -55,6 +55,8 @@
|
||||
#include <trace/hooks/vmscan.h>
|
||||
#include <trace/hooks/psi.h>
|
||||
#include <trace/hooks/selinux.h>
|
||||
#include <trace/hooks/hung_task.h>
|
||||
#include <trace/hooks/mmc_core.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
@ -106,7 +108,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_set_affinity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_gic_v3_set_affinity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val);
|
||||
@ -175,6 +177,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_free_iova);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_pick_next_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_alloc_start);
|
||||
@ -229,6 +233,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_module_permit_after_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_est_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_meminfo_proc_show);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_unmapped_area_from_anti_fragment_pool);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exclude_reserved_zone);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_unmapped_area_include_reserved_zone);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mem);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_print_slabinfo_header);
|
||||
@ -277,3 +284,15 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_inactive_ratio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_of_i2c_get_board_info);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_dirty_limits);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_uninterruptible_tasks_dn);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_reset);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_blk_mq_rw_recovery);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sd_update_bus_speed_mode);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_attach_sd);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sdhci_get_cd);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmc_gpio_cd_irqt);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_vmalloc_stack);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_stack_hash);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_track_hash);
|
||||
|
@ -937,21 +937,20 @@ int dw_edma_remove(struct dw_edma_chip *chip)
|
||||
/* Power management */
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
tasklet_kill(&chan->vc.task);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
tasklet_kill(&chan->vc.task);
|
||||
}
|
||||
|
||||
/* Deregister eDMA device */
|
||||
dma_async_device_unregister(&dw->wr_edma);
|
||||
list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
tasklet_kill(&chan->vc.task);
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
}
|
||||
|
||||
dma_async_device_unregister(&dw->rd_edma);
|
||||
list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
|
||||
vc.chan.device_node) {
|
||||
tasklet_kill(&chan->vc.task);
|
||||
list_del(&chan->vc.chan.device_node);
|
||||
}
|
||||
|
||||
/* Turn debugfs off */
|
||||
dw_edma_v0_core_debugfs_off();
|
||||
|
@ -1407,6 +1407,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
|
||||
.no_edge_events_on_boot = true,
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* The Dell Venue 10 Pro 5055, with Bay Trail SoC + TI PMIC uses an
|
||||
* external embedded-controller connected via I2C + an ACPI GPIO
|
||||
* event handler on INT33FFC:02 pin 12, causing spurious wakeups.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Venue 10 Pro 5055"),
|
||||
},
|
||||
.driver_data = &(struct acpi_gpiolib_dmi_quirk) {
|
||||
.ignore_wake = "INT33FC:02@12",
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* HP X2 10 models with Cherry Trail SoC + TI PMIC use an
|
||||
|
@ -8611,6 +8611,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
|
||||
}
|
||||
#endif
|
||||
|
||||
static int validate_overlay(struct drm_atomic_state *state)
|
||||
{
|
||||
int i;
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
struct drm_plane_state *primary_state, *overlay_state = NULL;
|
||||
|
||||
/* Check if primary plane is contained inside overlay */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
|
||||
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
|
||||
return 0;
|
||||
|
||||
overlay_state = new_plane_state;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/* check if we're making changes to the overlay plane */
|
||||
if (!overlay_state)
|
||||
return 0;
|
||||
|
||||
/* check if overlay plane is enabled */
|
||||
if (!overlay_state->crtc)
|
||||
return 0;
|
||||
|
||||
/* find the primary plane for the CRTC that the overlay is enabled on */
|
||||
primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
|
||||
if (IS_ERR(primary_state))
|
||||
return PTR_ERR(primary_state);
|
||||
|
||||
/* check if primary plane is enabled */
|
||||
if (!primary_state->crtc)
|
||||
return 0;
|
||||
|
||||
/* Perform the bounds check to ensure the overlay plane covers the primary */
|
||||
if (primary_state->crtc_x < overlay_state->crtc_x ||
|
||||
primary_state->crtc_y < overlay_state->crtc_y ||
|
||||
primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
|
||||
primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
|
||||
DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
|
||||
* @dev: The DRM device
|
||||
@ -8789,6 +8836,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = validate_overlay(state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
/* Add new/modified planes */
|
||||
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
|
||||
ret = dm_update_plane_state(dc, state, plane,
|
||||
|
@ -1110,7 +1110,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
|
||||
case SMU_SOCCLK:
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
case SMU_DCEFCLK:
|
||||
case SMU_FCLK:
|
||||
/* There is only 2 levels for fine grained DPM */
|
||||
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
@ -1130,6 +1129,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
|
||||
if (ret)
|
||||
return size;
|
||||
break;
|
||||
case SMU_DCEFCLK:
|
||||
dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1018,7 +1018,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
|
||||
case SMU_SOCCLK:
|
||||
case SMU_MCLK:
|
||||
case SMU_UCLK:
|
||||
case SMU_DCEFCLK:
|
||||
case SMU_FCLK:
|
||||
/* There is only 2 levels for fine grained DPM */
|
||||
if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
|
||||
@ -1038,6 +1037,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
|
||||
if (ret)
|
||||
goto forec_level_out;
|
||||
break;
|
||||
case SMU_DCEFCLK:
|
||||
dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -5655,7 +5655,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
|
||||
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
|
||||
|
||||
for (;;) {
|
||||
u8 esi[DP_DPRX_ESI_LEN] = {};
|
||||
/*
|
||||
* The +2 is because DP_DPRX_ESI_LEN is 14, but we then
|
||||
* pass in "esi+10" to drm_dp_channel_eq_ok(), which
|
||||
* takes a 6-byte array. So we actually need 16 bytes
|
||||
* here.
|
||||
*
|
||||
* Somebody who knows what the limits actually are
|
||||
* should check this, but for now this is at least
|
||||
* harmless and avoids a valid compiler warning about
|
||||
* using more of the array than we have allocated.
|
||||
*/
|
||||
u8 esi[DP_DPRX_ESI_LEN+2] = {};
|
||||
bool handled;
|
||||
int retry;
|
||||
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/uuid.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/* Device, Driver information */
|
||||
@ -1224,6 +1225,40 @@ static void elants_i2c_power_off(void *_data)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct acpi_device_id i2c_hid_ids[] = {
|
||||
{"ACPI0C50", 0 },
|
||||
{"PNP0C50", 0 },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const guid_t i2c_hid_guid =
|
||||
GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
|
||||
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
|
||||
|
||||
static bool elants_acpi_is_hid_device(struct device *dev)
|
||||
{
|
||||
acpi_handle handle = ACPI_HANDLE(dev);
|
||||
union acpi_object *obj;
|
||||
|
||||
if (acpi_match_device_ids(ACPI_COMPANION(dev), i2c_hid_ids))
|
||||
return false;
|
||||
|
||||
obj = acpi_evaluate_dsm_typed(handle, &i2c_hid_guid, 1, 1, NULL, ACPI_TYPE_INTEGER);
|
||||
if (obj) {
|
||||
ACPI_FREE(obj);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static bool elants_acpi_is_hid_device(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int elants_i2c_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
@ -1232,9 +1267,14 @@ static int elants_i2c_probe(struct i2c_client *client,
|
||||
unsigned long irqflags;
|
||||
int error;
|
||||
|
||||
/* Don't bind to i2c-hid compatible devices, these are handled by the i2c-hid drv. */
|
||||
if (elants_acpi_is_hid_device(&client->dev)) {
|
||||
dev_warn(&client->dev, "This device appears to be an I2C-HID device, not binding\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
|
||||
dev_err(&client->dev,
|
||||
"%s: i2c check functionality error\n", DEVICE_NAME);
|
||||
dev_err(&client->dev, "I2C check functionality error\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/input/mt.h>
|
||||
#include <linux/input/touchscreen.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
@ -335,10 +336,8 @@ static int silead_ts_get_id(struct i2c_client *client)
|
||||
|
||||
error = i2c_smbus_read_i2c_block_data(client, SILEAD_REG_ID,
|
||||
sizeof(chip_id), (u8 *)&chip_id);
|
||||
if (error < 0) {
|
||||
dev_err(&client->dev, "Chip ID read error %d\n", error);
|
||||
if (error < 0)
|
||||
return error;
|
||||
}
|
||||
|
||||
data->chip_id = le32_to_cpu(chip_id);
|
||||
dev_info(&client->dev, "Silead chip ID: 0x%8X", data->chip_id);
|
||||
@ -351,12 +350,49 @@ static int silead_ts_setup(struct i2c_client *client)
|
||||
int error;
|
||||
u32 status;
|
||||
|
||||
/*
|
||||
* Some buggy BIOS-es bring up the chip in a stuck state where it
|
||||
* blocks the I2C bus. The following steps are necessary to
|
||||
* unstuck the chip / bus:
|
||||
* 1. Turn off the Silead chip.
|
||||
* 2. Try to do an I2C transfer with the chip, this will fail in
|
||||
* response to which the I2C-bus-driver will call:
|
||||
* i2c_recover_bus() which will unstuck the I2C-bus. Note the
|
||||
* unstuck-ing of the I2C bus only works if we first drop the
|
||||
* chip off the bus by turning it off.
|
||||
* 3. Turn the chip back on.
|
||||
*
|
||||
* On the x86/ACPI systems were this problem is seen, step 1. and
|
||||
* 3. require making ACPI calls and dealing with ACPI Power
|
||||
* Resources. The workaround below runtime-suspends the chip to
|
||||
* turn it off, leaving it up to the ACPI subsystem to deal with
|
||||
* this.
|
||||
*/
|
||||
|
||||
if (device_property_read_bool(&client->dev,
|
||||
"silead,stuck-controller-bug")) {
|
||||
pm_runtime_set_active(&client->dev);
|
||||
pm_runtime_enable(&client->dev);
|
||||
pm_runtime_allow(&client->dev);
|
||||
|
||||
pm_runtime_suspend(&client->dev);
|
||||
|
||||
dev_warn(&client->dev, FW_BUG "Stuck I2C bus: please ignore the next 'controller timed out' error\n");
|
||||
silead_ts_get_id(client);
|
||||
|
||||
/* The forbid will also resume the device */
|
||||
pm_runtime_forbid(&client->dev);
|
||||
pm_runtime_disable(&client->dev);
|
||||
}
|
||||
|
||||
silead_ts_set_power(client, SILEAD_POWER_OFF);
|
||||
silead_ts_set_power(client, SILEAD_POWER_ON);
|
||||
|
||||
error = silead_ts_get_id(client);
|
||||
if (error)
|
||||
if (error) {
|
||||
dev_err(&client->dev, "Chip ID read error %d\n", error);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = silead_ts_init(client);
|
||||
if (error)
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <trace/hooks/iommu.h>
|
||||
|
||||
struct iommu_dma_msi_page {
|
||||
struct list_head list;
|
||||
@ -486,6 +487,8 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
|
||||
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
|
||||
true);
|
||||
|
||||
trace_android_vh_iommu_alloc_iova(dev, iova, size);
|
||||
|
||||
return (dma_addr_t)iova << shift;
|
||||
}
|
||||
|
||||
@ -503,6 +506,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
|
||||
else
|
||||
free_iova_fast(iovad, iova_pfn(iovad, iova),
|
||||
size >> iova_shift(iovad));
|
||||
|
||||
trace_android_vh_iommu_free_iova(iova, size);
|
||||
}
|
||||
|
||||
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
|
||||
|
@ -1211,7 +1211,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
reg = gic_dist_base(d) + offset + (index * 8);
|
||||
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
|
||||
|
||||
trace_android_vh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d));
|
||||
trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d));
|
||||
gic_write_irouter(val, reg);
|
||||
|
||||
/*
|
||||
|
@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
|
||||
* Return value: CAPI result code
|
||||
*/
|
||||
|
||||
u16 capi20_get_manufacturer(u32 contr, u8 *buf)
|
||||
u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
|
||||
{
|
||||
struct capi_ctr *ctr;
|
||||
u16 ret;
|
||||
@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
|
||||
* Return value: CAPI result code
|
||||
*/
|
||||
|
||||
u16 capi20_get_serial(u32 contr, u8 *serial)
|
||||
u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
|
||||
{
|
||||
struct capi_ctr *ctr;
|
||||
u16 ret;
|
||||
|
@ -2259,7 +2259,16 @@ static void new_to_req(struct v4l2_ctrl_ref *ref)
|
||||
if (!ref)
|
||||
return;
|
||||
ptr_to_ptr(ref->ctrl, ref->ctrl->p_new, ref->p_req);
|
||||
ref->req = ref;
|
||||
ref->valid_p_req = true;
|
||||
}
|
||||
|
||||
/* Copy the current value to the request value */
|
||||
static void cur_to_req(struct v4l2_ctrl_ref *ref)
|
||||
{
|
||||
if (!ref)
|
||||
return;
|
||||
ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->p_req);
|
||||
ref->valid_p_req = true;
|
||||
}
|
||||
|
||||
/* Copy the request value to the new value */
|
||||
@ -2267,8 +2276,8 @@ static void req_to_new(struct v4l2_ctrl_ref *ref)
|
||||
{
|
||||
if (!ref)
|
||||
return;
|
||||
if (ref->req)
|
||||
ptr_to_ptr(ref->ctrl, ref->req->p_req, ref->ctrl->p_new);
|
||||
if (ref->valid_p_req)
|
||||
ptr_to_ptr(ref->ctrl, ref->p_req, ref->ctrl->p_new);
|
||||
else
|
||||
ptr_to_ptr(ref->ctrl, ref->ctrl->p_cur, ref->ctrl->p_new);
|
||||
}
|
||||
@ -3446,39 +3455,8 @@ static void v4l2_ctrl_request_queue(struct media_request_object *obj)
|
||||
struct v4l2_ctrl_handler *hdl =
|
||||
container_of(obj, struct v4l2_ctrl_handler, req_obj);
|
||||
struct v4l2_ctrl_handler *main_hdl = obj->priv;
|
||||
struct v4l2_ctrl_handler *prev_hdl = NULL;
|
||||
struct v4l2_ctrl_ref *ref_ctrl, *ref_ctrl_prev = NULL;
|
||||
|
||||
mutex_lock(main_hdl->lock);
|
||||
if (list_empty(&main_hdl->requests_queued))
|
||||
goto queue;
|
||||
|
||||
prev_hdl = list_last_entry(&main_hdl->requests_queued,
|
||||
struct v4l2_ctrl_handler, requests_queued);
|
||||
/*
|
||||
* Note: prev_hdl and hdl must contain the same list of control
|
||||
* references, so if any differences are detected then that is a
|
||||
* driver bug and the WARN_ON is triggered.
|
||||
*/
|
||||
mutex_lock(prev_hdl->lock);
|
||||
ref_ctrl_prev = list_first_entry(&prev_hdl->ctrl_refs,
|
||||
struct v4l2_ctrl_ref, node);
|
||||
list_for_each_entry(ref_ctrl, &hdl->ctrl_refs, node) {
|
||||
if (ref_ctrl->req)
|
||||
continue;
|
||||
while (ref_ctrl_prev->ctrl->id < ref_ctrl->ctrl->id) {
|
||||
/* Should never happen, but just in case... */
|
||||
if (list_is_last(&ref_ctrl_prev->node,
|
||||
&prev_hdl->ctrl_refs))
|
||||
break;
|
||||
ref_ctrl_prev = list_next_entry(ref_ctrl_prev, node);
|
||||
}
|
||||
if (WARN_ON(ref_ctrl_prev->ctrl->id != ref_ctrl->ctrl->id))
|
||||
break;
|
||||
ref_ctrl->req = ref_ctrl_prev->req;
|
||||
}
|
||||
mutex_unlock(prev_hdl->lock);
|
||||
queue:
|
||||
list_add_tail(&hdl->requests_queued, &main_hdl->requests_queued);
|
||||
hdl->request_is_queued = true;
|
||||
mutex_unlock(main_hdl->lock);
|
||||
@ -3535,7 +3513,7 @@ v4l2_ctrl_request_hdl_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id)
|
||||
{
|
||||
struct v4l2_ctrl_ref *ref = find_ref_lock(hdl, id);
|
||||
|
||||
return (ref && ref->req == ref) ? ref->ctrl : NULL;
|
||||
return (ref && ref->valid_p_req) ? ref->ctrl : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_ctrl_request_hdl_ctrl_find);
|
||||
|
||||
@ -3724,7 +3702,13 @@ static int class_check(struct v4l2_ctrl_handler *hdl, u32 which)
|
||||
return find_ref_lock(hdl, which | 1) ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
/* Get extended controls. Allocates the helpers array if needed. */
|
||||
/*
|
||||
* Get extended controls. Allocates the helpers array if needed.
|
||||
*
|
||||
* Note that v4l2_g_ext_ctrls_common() with 'which' set to
|
||||
* V4L2_CTRL_WHICH_REQUEST_VAL is only called if the request was
|
||||
* completed, and in that case valid_p_req is true for all controls.
|
||||
*/
|
||||
static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
||||
struct v4l2_ext_controls *cs,
|
||||
struct video_device *vdev)
|
||||
@ -3733,9 +3717,10 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
||||
struct v4l2_ctrl_helper *helpers = helper;
|
||||
int ret;
|
||||
int i, j;
|
||||
bool def_value;
|
||||
bool is_default, is_request;
|
||||
|
||||
def_value = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
|
||||
is_default = (cs->which == V4L2_CTRL_WHICH_DEF_VAL);
|
||||
is_request = (cs->which == V4L2_CTRL_WHICH_REQUEST_VAL);
|
||||
|
||||
cs->error_idx = cs->count;
|
||||
cs->which = V4L2_CTRL_ID2WHICH(cs->which);
|
||||
@ -3761,11 +3746,9 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
||||
ret = -EACCES;
|
||||
|
||||
for (i = 0; !ret && i < cs->count; i++) {
|
||||
int (*ctrl_to_user)(struct v4l2_ext_control *c,
|
||||
struct v4l2_ctrl *ctrl);
|
||||
struct v4l2_ctrl *master;
|
||||
|
||||
ctrl_to_user = def_value ? def_to_user : cur_to_user;
|
||||
bool is_volatile = false;
|
||||
u32 idx = i;
|
||||
|
||||
if (helpers[i].mref == NULL)
|
||||
continue;
|
||||
@ -3775,31 +3758,48 @@ static int v4l2_g_ext_ctrls_common(struct v4l2_ctrl_handler *hdl,
|
||||
|
||||
v4l2_ctrl_lock(master);
|
||||
|
||||
/* g_volatile_ctrl will update the new control values */
|
||||
if (!def_value &&
|
||||
/*
|
||||
* g_volatile_ctrl will update the new control values.
|
||||
* This makes no sense for V4L2_CTRL_WHICH_DEF_VAL and
|
||||
* V4L2_CTRL_WHICH_REQUEST_VAL. In the case of requests
|
||||
* it is v4l2_ctrl_request_complete() that copies the
|
||||
* volatile controls at the time of request completion
|
||||
* to the request, so you don't want to do that again.
|
||||
*/
|
||||
if (!is_default && !is_request &&
|
||||
((master->flags & V4L2_CTRL_FLAG_VOLATILE) ||
|
||||
(master->has_volatiles && !is_cur_manual(master)))) {
|
||||
for (j = 0; j < master->ncontrols; j++)
|
||||
cur_to_new(master->cluster[j]);
|
||||
ret = call_op(master, g_volatile_ctrl);
|
||||
ctrl_to_user = new_to_user;
|
||||
is_volatile = true;
|
||||
}
|
||||
/* If OK, then copy the current (for non-volatile controls)
|
||||
or the new (for volatile controls) control values to the
|
||||
caller */
|
||||
if (!ret) {
|
||||
u32 idx = i;
|
||||
|
||||
do {
|
||||
if (helpers[idx].ref->req)
|
||||
ret = req_to_user(cs->controls + idx,
|
||||
helpers[idx].ref->req);
|
||||
else
|
||||
ret = ctrl_to_user(cs->controls + idx,
|
||||
helpers[idx].ref->ctrl);
|
||||
idx = helpers[idx].next;
|
||||
} while (!ret && idx);
|
||||
if (ret) {
|
||||
v4l2_ctrl_unlock(master);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the default value (if is_default is true), the
|
||||
* request value (if is_request is true and p_req is valid),
|
||||
* the new volatile value (if is_volatile is true) or the
|
||||
* current value.
|
||||
*/
|
||||
do {
|
||||
struct v4l2_ctrl_ref *ref = helpers[idx].ref;
|
||||
|
||||
if (is_default)
|
||||
ret = def_to_user(cs->controls + idx, ref->ctrl);
|
||||
else if (is_request && ref->valid_p_req)
|
||||
ret = req_to_user(cs->controls + idx, ref);
|
||||
else if (is_volatile)
|
||||
ret = new_to_user(cs->controls + idx, ref->ctrl);
|
||||
else
|
||||
ret = cur_to_user(cs->controls + idx, ref->ctrl);
|
||||
idx = helpers[idx].next;
|
||||
} while (!ret && idx);
|
||||
|
||||
v4l2_ctrl_unlock(master);
|
||||
}
|
||||
|
||||
@ -4437,8 +4437,6 @@ void v4l2_ctrl_request_complete(struct media_request *req,
|
||||
unsigned int i;
|
||||
|
||||
if (ctrl->flags & V4L2_CTRL_FLAG_VOLATILE) {
|
||||
ref->req = ref;
|
||||
|
||||
v4l2_ctrl_lock(master);
|
||||
/* g_volatile_ctrl will update the current control values */
|
||||
for (i = 0; i < master->ncontrols; i++)
|
||||
@ -4448,21 +4446,12 @@ void v4l2_ctrl_request_complete(struct media_request *req,
|
||||
v4l2_ctrl_unlock(master);
|
||||
continue;
|
||||
}
|
||||
if (ref->req == ref)
|
||||
if (ref->valid_p_req)
|
||||
continue;
|
||||
|
||||
/* Copy the current control value into the request */
|
||||
v4l2_ctrl_lock(ctrl);
|
||||
if (ref->req) {
|
||||
ptr_to_ptr(ctrl, ref->req->p_req, ref->p_req);
|
||||
} else {
|
||||
ptr_to_ptr(ctrl, ctrl->p_cur, ref->p_req);
|
||||
/*
|
||||
* Set ref->req to ensure that when userspace wants to
|
||||
* obtain the controls of this request it will take
|
||||
* this value and not the current value of the control.
|
||||
*/
|
||||
ref->req = ref;
|
||||
}
|
||||
cur_to_req(ref);
|
||||
v4l2_ctrl_unlock(ctrl);
|
||||
}
|
||||
|
||||
@ -4527,7 +4516,7 @@ int v4l2_ctrl_request_setup(struct media_request *req,
|
||||
struct v4l2_ctrl_ref *r =
|
||||
find_ref(hdl, master->cluster[i]->id);
|
||||
|
||||
if (r->req && r == r->req) {
|
||||
if (r->valid_p_req) {
|
||||
have_new_data = true;
|
||||
break;
|
||||
}
|
||||
|
@ -95,19 +95,19 @@
|
||||
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define v1printk(a...) do { \
|
||||
if (verbose) \
|
||||
printk(KERN_INFO a); \
|
||||
} while (0)
|
||||
#define v2printk(a...) do { \
|
||||
if (verbose > 1) \
|
||||
printk(KERN_INFO a); \
|
||||
touch_nmi_watchdog(); \
|
||||
} while (0)
|
||||
#define eprintk(a...) do { \
|
||||
printk(KERN_ERR a); \
|
||||
WARN_ON(1); \
|
||||
} while (0)
|
||||
#define v1printk(a...) do { \
|
||||
if (verbose) \
|
||||
printk(KERN_INFO a); \
|
||||
} while (0)
|
||||
#define v2printk(a...) do { \
|
||||
if (verbose > 1) \
|
||||
printk(KERN_INFO a); \
|
||||
touch_nmi_watchdog(); \
|
||||
} while (0)
|
||||
#define eprintk(a...) do { \
|
||||
printk(KERN_ERR a); \
|
||||
WARN_ON(1); \
|
||||
} while (0)
|
||||
#define MAX_CONFIG_LEN 40
|
||||
|
||||
static struct kgdb_io kgdbts_io_ops;
|
||||
|
@ -47,6 +47,8 @@
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <trace/hooks/mmc_core.h>
|
||||
|
||||
#include "queue.h"
|
||||
#include "block.h"
|
||||
#include "core.h"
|
||||
@ -963,6 +965,11 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
|
||||
struct mmc_blk_data *main_md =
|
||||
dev_get_drvdata(&host->card->dev);
|
||||
int part_err;
|
||||
bool allow = true;
|
||||
|
||||
trace_android_vh_mmc_blk_reset(host, err, &allow);
|
||||
if (!allow)
|
||||
return -ENODEV;
|
||||
|
||||
main_md->part_curr = main_md->part_type;
|
||||
part_err = mmc_blk_part_switch(host->card, md->part_type);
|
||||
@ -1795,6 +1802,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
|
||||
err && mmc_blk_reset(md, card->host, type)) {
|
||||
pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
|
||||
mqrq->retries = MMC_NO_RETRIES;
|
||||
trace_android_vh_mmc_blk_mq_rw_recovery(card);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2236,6 +2244,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
|
||||
case MMC_ISSUE_ASYNC:
|
||||
switch (req_op(req)) {
|
||||
case REQ_OP_FLUSH:
|
||||
if (!mmc_cache_enabled(host)) {
|
||||
blk_mq_end_request(req, BLK_STS_OK);
|
||||
return MMC_REQ_FINISHED;
|
||||
}
|
||||
ret = mmc_blk_cqe_issue_flush(mq, req);
|
||||
break;
|
||||
case REQ_OP_READ:
|
||||
|
@ -29,6 +29,7 @@ struct mmc_bus_ops {
|
||||
int (*shutdown)(struct mmc_host *);
|
||||
int (*hw_reset)(struct mmc_host *);
|
||||
int (*sw_reset)(struct mmc_host *);
|
||||
bool (*cache_enabled)(struct mmc_host *);
|
||||
};
|
||||
|
||||
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
|
||||
@ -163,4 +164,12 @@ static inline void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
|
||||
host->ops->post_req(host, mrq, err);
|
||||
}
|
||||
|
||||
static inline bool mmc_cache_enabled(struct mmc_host *host)
|
||||
{
|
||||
if (host->bus_ops->cache_enabled)
|
||||
return host->bus_ops->cache_enabled(host);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -2033,6 +2033,12 @@ static void mmc_detect(struct mmc_host *host)
|
||||
}
|
||||
}
|
||||
|
||||
static bool _mmc_cache_enabled(struct mmc_host *host)
|
||||
{
|
||||
return host->card->ext_csd.cache_size > 0 &&
|
||||
host->card->ext_csd.cache_ctrl & 1;
|
||||
}
|
||||
|
||||
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
|
||||
{
|
||||
int err = 0;
|
||||
@ -2212,6 +2218,7 @@ static const struct mmc_bus_ops mmc_ops = {
|
||||
.alive = mmc_alive,
|
||||
.shutdown = mmc_shutdown,
|
||||
.hw_reset = _mmc_hw_reset,
|
||||
.cache_enabled = _mmc_cache_enabled,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -988,9 +988,7 @@ int mmc_flush_cache(struct mmc_card *card)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if (mmc_card_mmc(card) &&
|
||||
(card->ext_csd.cache_size > 0) &&
|
||||
(card->ext_csd.cache_ctrl & 1)) {
|
||||
if (mmc_cache_enabled(card->host)) {
|
||||
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
|
||||
EXT_CSD_FLUSH_CACHE, 1,
|
||||
MMC_CACHE_FLUSH_TIMEOUT_MS);
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include <linux/mmc/mmc.h>
|
||||
#include <linux/mmc/sd.h>
|
||||
|
||||
#include <trace/hooks/mmc_core.h>
|
||||
|
||||
#include "core.h"
|
||||
#include "card.h"
|
||||
#include "host.h"
|
||||
@ -462,6 +464,8 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
|
||||
SD_MODE_UHS_SDR12)) {
|
||||
card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
|
||||
}
|
||||
|
||||
trace_android_vh_sd_update_bus_speed_mode(card);
|
||||
}
|
||||
|
||||
static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
|
||||
@ -1403,5 +1407,7 @@ int mmc_attach_sd(struct mmc_host *host)
|
||||
pr_err("%s: error %d whilst initialising SD card\n",
|
||||
mmc_hostname(host), err);
|
||||
|
||||
trace_android_vh_mmc_attach_sd(host, ocr, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <trace/hooks/mmc_core.h>
|
||||
|
||||
#include "slot-gpio.h"
|
||||
|
||||
struct mmc_gpio {
|
||||
@ -30,6 +32,11 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
|
||||
/* Schedule a card detection after a debounce timeout */
|
||||
struct mmc_host *host = dev_id;
|
||||
struct mmc_gpio *ctx = host->slot.handler_priv;
|
||||
bool allow = true;
|
||||
|
||||
trace_android_vh_mmc_gpio_cd_irqt(host, &allow);
|
||||
if (!allow)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
host->trigger_card_event = true;
|
||||
mmc_detect_change(host, msecs_to_jiffies(ctx->cd_debounce_delay_ms));
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include <linux/mmc/sdio.h>
|
||||
#include <linux/mmc/slot-gpio.h>
|
||||
|
||||
#include <trace/hooks/mmc_core.h>
|
||||
|
||||
#include "sdhci.h"
|
||||
|
||||
#define DRIVER_NAME "sdhci"
|
||||
@ -2395,6 +2397,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
int gpio_cd = mmc_gpio_get_cd(mmc);
|
||||
bool allow = true;
|
||||
|
||||
if (host->flags & SDHCI_DEVICE_DEAD)
|
||||
return 0;
|
||||
@ -2403,6 +2406,10 @@ static int sdhci_get_cd(struct mmc_host *mmc)
|
||||
if (!mmc_card_is_removable(host->mmc))
|
||||
return 1;
|
||||
|
||||
trace_android_vh_sdhci_get_cd(host, &allow);
|
||||
if (!allow)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Try slot gpio detect, if defined it take precedence
|
||||
* over build in controller functionality
|
||||
|
@ -2671,7 +2671,7 @@ do { \
|
||||
seq_printf(seq, "%-12s", s); \
|
||||
for (i = 0; i < n; ++i) \
|
||||
seq_printf(seq, " %16" fmt_spec, v); \
|
||||
seq_putc(seq, '\n'); \
|
||||
seq_putc(seq, '\n'); \
|
||||
} while (0)
|
||||
#define S(s, v) S3("s", s, v)
|
||||
#define T3(fmt_spec, s, v) S3(fmt_spec, s, tx[i].v)
|
||||
|
@ -2559,12 +2559,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
|
||||
spin_lock_bh(&eosw_txq->lock);
|
||||
if (tc != FW_SCHED_CLS_NONE) {
|
||||
if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
|
||||
goto out_unlock;
|
||||
goto out_free_skb;
|
||||
|
||||
next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
|
||||
} else {
|
||||
if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
|
||||
goto out_unlock;
|
||||
goto out_free_skb;
|
||||
|
||||
next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
|
||||
}
|
||||
@ -2600,17 +2600,19 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
|
||||
eosw_txq_flush_pending_skbs(eosw_txq);
|
||||
|
||||
ret = eosw_txq_enqueue(eosw_txq, skb);
|
||||
if (ret) {
|
||||
dev_consume_skb_any(skb);
|
||||
goto out_unlock;
|
||||
}
|
||||
if (ret)
|
||||
goto out_free_skb;
|
||||
|
||||
eosw_txq->state = next_state;
|
||||
eosw_txq->flowc_idx = eosw_txq->pidx;
|
||||
eosw_txq_advance(eosw_txq, 1);
|
||||
ethofld_xmit(dev, eosw_txq);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_bh(&eosw_txq->lock);
|
||||
return 0;
|
||||
|
||||
out_free_skb:
|
||||
dev_consume_skb_any(skb);
|
||||
spin_unlock_bh(&eosw_txq->lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
u32 channel, int fifosz, u8 qmode)
|
||||
{
|
||||
unsigned int rqs = fifosz / 256 - 1;
|
||||
u32 mtl_rx_op, mtl_rx_int;
|
||||
u32 mtl_rx_op;
|
||||
|
||||
mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
|
||||
|
||||
@ -283,11 +283,6 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
}
|
||||
|
||||
writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
|
||||
|
||||
/* Enable MTL RX overflow */
|
||||
mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
|
||||
writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
|
||||
ioaddr + MTL_CHAN_INT_CTRL(channel));
|
||||
}
|
||||
|
||||
static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
|
||||
|
@ -4138,7 +4138,6 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
||||
/* To handle GMAC own interrupts */
|
||||
if ((priv->plat->has_gmac) || xmac) {
|
||||
int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
|
||||
int mtl_status;
|
||||
|
||||
if (unlikely(status)) {
|
||||
/* For LPI we need to save the tx status */
|
||||
@ -4149,17 +4148,8 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
for (queue = 0; queue < queues_count; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
|
||||
mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
|
||||
queue);
|
||||
if (mtl_status != -EINVAL)
|
||||
status |= mtl_status;
|
||||
|
||||
if (status & CORE_IRQ_MTL_RX_OVERFLOW)
|
||||
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
|
||||
rx_q->rx_tail_addr,
|
||||
queue);
|
||||
status = stmmac_host_mtl_irq_status(priv, priv->hw,
|
||||
queue);
|
||||
}
|
||||
|
||||
/* PCS link status */
|
||||
|
@ -406,9 +406,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
||||
offset += hdr_padded_len;
|
||||
p += hdr_padded_len;
|
||||
|
||||
copy = len;
|
||||
if (copy > skb_tailroom(skb))
|
||||
copy = skb_tailroom(skb);
|
||||
/* Copy all frame if it fits skb->head, otherwise
|
||||
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
|
||||
*/
|
||||
if (len <= skb_tailroom(skb))
|
||||
copy = len;
|
||||
else
|
||||
copy = ETH_HLEN + metasize;
|
||||
skb_put_data(skb, p, copy);
|
||||
|
||||
if (metasize) {
|
||||
|
@ -3825,6 +3825,68 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
|
||||
local->last_auth = auth_type;
|
||||
}
|
||||
|
||||
static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
|
||||
{
|
||||
int i, status;
|
||||
/* large variables, so don't inline this function,
|
||||
* maybe change to kmalloc
|
||||
*/
|
||||
tdsRssiRid rssi_rid;
|
||||
CapabilityRid cap_rid;
|
||||
|
||||
kfree(ai->SSID);
|
||||
ai->SSID = NULL;
|
||||
// general configuration (read/modify/write)
|
||||
status = readConfigRid(ai, lock);
|
||||
if (status != SUCCESS) return ERROR;
|
||||
|
||||
status = readCapabilityRid(ai, &cap_rid, lock);
|
||||
if (status != SUCCESS) return ERROR;
|
||||
|
||||
status = PC4500_readrid(ai, RID_RSSI, &rssi_rid, sizeof(rssi_rid), lock);
|
||||
if (status == SUCCESS) {
|
||||
if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
|
||||
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
|
||||
}
|
||||
else {
|
||||
kfree(ai->rssi);
|
||||
ai->rssi = NULL;
|
||||
if (cap_rid.softCap & cpu_to_le16(8))
|
||||
ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
|
||||
else
|
||||
airo_print_warn(ai->dev->name, "unknown received signal "
|
||||
"level scale");
|
||||
}
|
||||
ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
|
||||
set_auth_type(ai, AUTH_OPEN);
|
||||
ai->config.modulation = MOD_CCK;
|
||||
|
||||
if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
|
||||
(cap_rid.extSoftCap & cpu_to_le16(1)) &&
|
||||
micsetup(ai) == SUCCESS) {
|
||||
ai->config.opmode |= MODE_MIC;
|
||||
set_bit(FLAG_MIC_CAPABLE, &ai->flags);
|
||||
}
|
||||
|
||||
/* Save off the MAC */
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
mac[i] = ai->config.macAddr[i];
|
||||
}
|
||||
|
||||
/* Check to see if there are any insmod configured
|
||||
rates to add */
|
||||
if (rates[0]) {
|
||||
memset(ai->config.rates, 0, sizeof(ai->config.rates));
|
||||
for (i = 0; i < 8 && rates[i]; i++) {
|
||||
ai->config.rates[i] = rates[i];
|
||||
}
|
||||
}
|
||||
set_bit (FLAG_COMMIT, &ai->flags);
|
||||
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
||||
static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
|
||||
{
|
||||
Cmd cmd;
|
||||
@ -3871,58 +3933,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
|
||||
if (lock)
|
||||
up(&ai->sem);
|
||||
if (ai->config.len == 0) {
|
||||
int i;
|
||||
tdsRssiRid rssi_rid;
|
||||
CapabilityRid cap_rid;
|
||||
|
||||
kfree(ai->SSID);
|
||||
ai->SSID = NULL;
|
||||
// general configuration (read/modify/write)
|
||||
status = readConfigRid(ai, lock);
|
||||
if (status != SUCCESS) return ERROR;
|
||||
|
||||
status = readCapabilityRid(ai, &cap_rid, lock);
|
||||
if (status != SUCCESS) return ERROR;
|
||||
|
||||
status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
|
||||
if (status == SUCCESS) {
|
||||
if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
|
||||
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
|
||||
}
|
||||
else {
|
||||
kfree(ai->rssi);
|
||||
ai->rssi = NULL;
|
||||
if (cap_rid.softCap & cpu_to_le16(8))
|
||||
ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
|
||||
else
|
||||
airo_print_warn(ai->dev->name, "unknown received signal "
|
||||
"level scale");
|
||||
}
|
||||
ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
|
||||
set_auth_type(ai, AUTH_OPEN);
|
||||
ai->config.modulation = MOD_CCK;
|
||||
|
||||
if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
|
||||
(cap_rid.extSoftCap & cpu_to_le16(1)) &&
|
||||
micsetup(ai) == SUCCESS) {
|
||||
ai->config.opmode |= MODE_MIC;
|
||||
set_bit(FLAG_MIC_CAPABLE, &ai->flags);
|
||||
}
|
||||
|
||||
/* Save off the MAC */
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
mac[i] = ai->config.macAddr[i];
|
||||
}
|
||||
|
||||
/* Check to see if there are any insmod configured
|
||||
rates to add */
|
||||
if (rates[0]) {
|
||||
memset(ai->config.rates, 0, sizeof(ai->config.rates));
|
||||
for (i = 0; i < 8 && rates[i]; i++) {
|
||||
ai->config.rates[i] = rates[i];
|
||||
}
|
||||
}
|
||||
set_bit (FLAG_COMMIT, &ai->flags);
|
||||
status = airo_readconfig(ai, mac, lock);
|
||||
if (status != SUCCESS)
|
||||
return ERROR;
|
||||
}
|
||||
|
||||
/* Setup the SSIDs if present */
|
||||
|
@ -2634,6 +2634,7 @@ static void nvme_reset_work(struct work_struct *work)
|
||||
* Don't limit the IOMMU merged segment size.
|
||||
*/
|
||||
dma_set_max_seg_size(dev->dev, 0xffffffff);
|
||||
dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1);
|
||||
|
||||
mutex_unlock(&dev->shutdown_lock);
|
||||
|
||||
|
@ -313,7 +313,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req)
|
||||
case NVME_LOG_ANA:
|
||||
return nvmet_execute_get_log_page_ana(req);
|
||||
}
|
||||
pr_err("unhandled lid %d on qid %d\n",
|
||||
pr_debug("unhandled lid %d on qid %d\n",
|
||||
req->cmd->get_log_page.lid, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
|
||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
@ -657,7 +657,7 @@ static void nvmet_execute_identify(struct nvmet_req *req)
|
||||
return nvmet_execute_identify_desclist(req);
|
||||
}
|
||||
|
||||
pr_err("unhandled identify cns %d on qid %d\n",
|
||||
pr_debug("unhandled identify cns %d on qid %d\n",
|
||||
req->cmd->identify.cns, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||
nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
|
||||
@ -972,7 +972,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
||||
pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
||||
req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
|
@ -1660,7 +1660,7 @@ static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
|
||||
if (pcie->ep_state == EP_STATE_ENABLED)
|
||||
return;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_resume_and_get(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
|
||||
ret);
|
||||
|
@ -116,7 +116,7 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
|
||||
* the config space access window. Since we are working with
|
||||
* the high-order 32 bits, shift everything down by 32 bits.
|
||||
*/
|
||||
node_bits = (cfg->res.start >> 32) & (1 << 12);
|
||||
node_bits = upper_32_bits(cfg->res.start) & (1 << 12);
|
||||
|
||||
v |= node_bits;
|
||||
set_val(v, where, size, val);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/pci-acpi.h>
|
||||
#include <linux/pci-ecam.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include "../pci.h"
|
||||
|
||||
#if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
|
||||
@ -315,9 +316,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
|
||||
* structure here for the BAR.
|
||||
*/
|
||||
bar4_start = res_pem->start + 0xf00000;
|
||||
pem_pci->ea_entry[0] = (u32)bar4_start | 2;
|
||||
pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u;
|
||||
pem_pci->ea_entry[2] = (u32)(bar4_start >> 32);
|
||||
pem_pci->ea_entry[0] = lower_32_bits(bar4_start) | 2;
|
||||
pem_pci->ea_entry[1] = lower_32_bits(res_pem->end - bar4_start) & ~3u;
|
||||
pem_pci->ea_entry[2] = upper_32_bits(bar4_start);
|
||||
|
||||
cfg->priv = pem_pci;
|
||||
return 0;
|
||||
@ -325,9 +326,9 @@ static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg,
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)
|
||||
|
||||
#define PEM_RES_BASE 0x87e0c0000000UL
|
||||
#define PEM_NODE_MASK GENMASK(45, 44)
|
||||
#define PEM_INDX_MASK GENMASK(26, 24)
|
||||
#define PEM_RES_BASE 0x87e0c0000000ULL
|
||||
#define PEM_NODE_MASK GENMASK_ULL(45, 44)
|
||||
#define PEM_INDX_MASK GENMASK_ULL(26, 24)
|
||||
#define PEM_MIN_DOM_IN_NODE 4
|
||||
#define PEM_MAX_DOM_IN_NODE 10
|
||||
|
||||
|
@ -533,6 +533,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
|
||||
slot->flags &= ~SLOT_ENABLED;
|
||||
continue;
|
||||
}
|
||||
pci_dev_put(dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -612,6 +612,12 @@ static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
|
||||
#if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
|
||||
int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
|
||||
struct resource *res);
|
||||
#else
|
||||
static inline int acpi_get_rc_resources(struct device *dev, const char *hid,
|
||||
u16 segment, struct resource *res)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar);
|
||||
|
@ -474,6 +474,11 @@ static int cros_typec_enable_dp(struct cros_typec_data *typec,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
if (!pd_ctrl->dp_mode) {
|
||||
dev_err(typec->dev, "No valid DP mode provided.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Status VDO. */
|
||||
dp_data.status = DP_STATUS_ENABLED;
|
||||
if (port->mux_flags & USB_PD_MUX_HPD_IRQ)
|
||||
|
@ -11590,13 +11590,20 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
|
||||
lpfc_ctx_cmd ctx_cmd)
|
||||
{
|
||||
struct lpfc_io_buf *lpfc_cmd;
|
||||
IOCB_t *icmd = NULL;
|
||||
int rc = 1;
|
||||
|
||||
if (iocbq->vport != vport)
|
||||
return rc;
|
||||
|
||||
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
|
||||
!(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ))
|
||||
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
|
||||
!(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ) ||
|
||||
iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
|
||||
return rc;
|
||||
|
||||
icmd = &iocbq->iocb;
|
||||
if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
|
||||
icmd->ulpCommand == CMD_CLOSE_XRI_CN)
|
||||
return rc;
|
||||
|
||||
lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
|
||||
|
@ -1391,7 +1391,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
||||
static bool tcmu_handle_completions(struct tcmu_dev *udev)
|
||||
{
|
||||
struct tcmu_mailbox *mb;
|
||||
struct tcmu_cmd *cmd;
|
||||
@ -1434,7 +1434,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
|
||||
pr_err("cmd_id %u not found, ring is broken\n",
|
||||
entry->hdr.cmd_id);
|
||||
set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
|
||||
break;
|
||||
return false;
|
||||
}
|
||||
|
||||
tcmu_handle_completion(cmd, entry);
|
||||
|
@ -1287,11 +1287,10 @@ sl811h_hub_control(
|
||||
goto error;
|
||||
put_unaligned_le32(sl811->port1, buf);
|
||||
|
||||
#ifndef VERBOSE
|
||||
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
|
||||
#endif
|
||||
dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
|
||||
sl811->port1);
|
||||
if (__is_defined(VERBOSE) ||
|
||||
*(u16*)(buf+2)) /* only if wPortChange is interesting */
|
||||
dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
|
||||
sl811->port1);
|
||||
break;
|
||||
case SetPortFeature:
|
||||
if (wIndex != 1 || wLength != 0)
|
||||
|
@ -260,6 +260,7 @@ enum frs_typec_current {
|
||||
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
|
||||
|
||||
#define GET_SINK_CAP_RETRY_MS 100
|
||||
#define SEND_DISCOVER_RETRY_MS 100
|
||||
|
||||
struct pd_mode_data {
|
||||
int svid_index; /* current SVID index */
|
||||
@ -367,6 +368,8 @@ struct tcpm_port {
|
||||
struct kthread_work vdm_state_machine;
|
||||
struct hrtimer enable_frs_timer;
|
||||
struct kthread_work enable_frs;
|
||||
struct hrtimer send_discover_timer;
|
||||
struct kthread_work send_discover_work;
|
||||
bool state_machine_running;
|
||||
bool vdm_sm_running;
|
||||
|
||||
@ -1196,6 +1199,16 @@ static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int del
|
||||
}
|
||||
}
|
||||
|
||||
static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
|
||||
{
|
||||
if (delay_ms) {
|
||||
hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
|
||||
} else {
|
||||
hrtimer_cancel(&port->send_discover_timer);
|
||||
kthread_queue_work(port->wq, &port->send_discover_work);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
|
||||
unsigned int delay_ms)
|
||||
{
|
||||
@ -1555,6 +1568,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
||||
if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
|
||||
typec_partner_set_svdm_version(port->partner,
|
||||
PD_VDO_SVDM_VER(p[0]));
|
||||
|
||||
tcpm_ams_start(port, DISCOVER_IDENTITY);
|
||||
/* 6.4.4.3.1: Only respond as UFP (device) */
|
||||
if (port->data_role == TYPEC_DEVICE &&
|
||||
port->nr_snk_vdo) {
|
||||
@ -1573,14 +1588,19 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
||||
}
|
||||
break;
|
||||
case CMD_DISCOVER_SVID:
|
||||
tcpm_ams_start(port, DISCOVER_SVIDS);
|
||||
break;
|
||||
case CMD_DISCOVER_MODES:
|
||||
tcpm_ams_start(port, DISCOVER_MODES);
|
||||
break;
|
||||
case CMD_ENTER_MODE:
|
||||
tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
|
||||
break;
|
||||
case CMD_EXIT_MODE:
|
||||
tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
|
||||
break;
|
||||
case CMD_ATTENTION:
|
||||
tcpm_ams_start(port, ATTENTION);
|
||||
/* Attention command does not have response */
|
||||
*adev_action = ADEV_ATTENTION;
|
||||
return 0;
|
||||
@ -1873,6 +1893,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
|
||||
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
|
||||
if (res == 0)
|
||||
port->send_discover = false;
|
||||
else if (res == -EAGAIN)
|
||||
mod_send_discover_delayed_work(port,
|
||||
SEND_DISCOVER_RETRY_MS);
|
||||
break;
|
||||
case CMD_DISCOVER_SVID:
|
||||
res = tcpm_ams_start(port, DISCOVER_SVIDS);
|
||||
@ -1897,8 +1920,10 @@ static void vdm_run_state_machine(struct tcpm_port *port)
|
||||
break;
|
||||
}
|
||||
|
||||
if (res < 0)
|
||||
if (res < 0) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
port->vdm_state = VDM_STATE_SEND_MESSAGE;
|
||||
@ -2287,6 +2312,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
|
||||
bool frs_enable;
|
||||
int ret;
|
||||
|
||||
if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case PD_DATA_SOURCE_CAP:
|
||||
for (i = 0; i < cnt; i++)
|
||||
@ -2420,7 +2451,10 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
|
||||
NONE_AMS);
|
||||
break;
|
||||
case PD_DATA_VENDOR_DEF:
|
||||
tcpm_handle_vdm_request(port, msg->payload, cnt);
|
||||
if (tcpm_vdm_ams(port) || port->nr_snk_vdo)
|
||||
tcpm_handle_vdm_request(port, msg->payload, cnt);
|
||||
else if (port->negotiated_rev > PD_REV20)
|
||||
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
|
||||
break;
|
||||
case PD_DATA_BIST:
|
||||
port->bist_request = le32_to_cpu(msg->payload[0]);
|
||||
@ -2462,6 +2496,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
|
||||
enum tcpm_state next_state;
|
||||
|
||||
/*
|
||||
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
|
||||
* VDM AMS if waiting for VDM responses and will be handled later.
|
||||
*/
|
||||
if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case PD_CTRL_GOOD_CRC:
|
||||
case PD_CTRL_PING:
|
||||
@ -2720,7 +2764,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
|
||||
enum pd_ext_msg_type type = pd_header_type_le(msg->header);
|
||||
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
|
||||
|
||||
if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
|
||||
/* stopping VDM state machine if interrupted by other Messages */
|
||||
if (tcpm_vdm_ams(port)) {
|
||||
port->vdm_state = VDM_STATE_ERR_BUSY;
|
||||
tcpm_ams_finish(port);
|
||||
mod_vdm_delayed_work(port, 0);
|
||||
}
|
||||
|
||||
if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
|
||||
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
|
||||
tcpm_log(port, "Unchunked extended messages unsupported");
|
||||
return;
|
||||
@ -2814,7 +2865,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
|
||||
"Data role mismatch, initiating error recovery");
|
||||
tcpm_set_state(port, ERROR_RECOVERY, 0);
|
||||
} else {
|
||||
if (msg->header & PD_HEADER_EXT_HDR)
|
||||
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
|
||||
tcpm_pd_ext_msg_request(port, msg);
|
||||
else if (cnt)
|
||||
tcpm_pd_data_request(port, msg);
|
||||
@ -3725,14 +3776,6 @@ bool tcpm_is_toggling(struct tcpm_port *port)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcpm_is_toggling);
|
||||
|
||||
static void tcpm_check_send_discover(struct tcpm_port *port)
|
||||
{
|
||||
if ((port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20) &&
|
||||
port->send_discover && port->pd_capable)
|
||||
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
|
||||
port->send_discover = false;
|
||||
}
|
||||
|
||||
static void tcpm_swap_complete(struct tcpm_port *port, int result)
|
||||
{
|
||||
if (port->swap_pending) {
|
||||
@ -3989,7 +4032,18 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
break;
|
||||
}
|
||||
|
||||
tcpm_check_send_discover(port);
|
||||
/*
|
||||
* 6.4.4.3.1 Discover Identity
|
||||
* "The Discover Identity Command Shall only be sent to SOP when there is an
|
||||
* Explicit Contract."
|
||||
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
|
||||
* port->explicit_contract to decide whether to send the command.
|
||||
*/
|
||||
if (port->explicit_contract)
|
||||
mod_send_discover_delayed_work(port, 0);
|
||||
else
|
||||
port->send_discover = false;
|
||||
|
||||
/*
|
||||
* 6.3.5
|
||||
* Sending ping messages is not necessary if
|
||||
@ -4299,7 +4353,18 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
break;
|
||||
}
|
||||
|
||||
tcpm_check_send_discover(port);
|
||||
/*
|
||||
* 6.4.4.3.1 Discover Identity
|
||||
* "The Discover Identity Command Shall only be sent to SOP when there is an
|
||||
* Explicit Contract."
|
||||
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
|
||||
* port->explicit_contract.
|
||||
*/
|
||||
if (port->explicit_contract)
|
||||
mod_send_discover_delayed_work(port, 0);
|
||||
else
|
||||
port->send_discover = false;
|
||||
|
||||
power_supply_changed(port->psy);
|
||||
break;
|
||||
|
||||
@ -5413,6 +5478,29 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
|
||||
mutex_unlock(&port->lock);
|
||||
}
|
||||
|
||||
static void tcpm_send_discover_work(struct kthread_work *work)
|
||||
{
|
||||
struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
|
||||
|
||||
mutex_lock(&port->lock);
|
||||
/* No need to send DISCOVER_IDENTITY anymore */
|
||||
if (!port->send_discover)
|
||||
goto unlock;
|
||||
|
||||
/* Retry if the port is not idle */
|
||||
if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
|
||||
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Only send the Message if the port is host for PD rev2.0 */
|
||||
if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
|
||||
tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&port->lock);
|
||||
}
|
||||
|
||||
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
|
||||
{
|
||||
struct tcpm_port *port = typec_get_drvdata(p);
|
||||
@ -6271,6 +6359,14 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
|
||||
{
|
||||
struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
|
||||
|
||||
kthread_queue_work(port->wq, &port->send_discover_work);
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||
{
|
||||
struct tcpm_port *port;
|
||||
@ -6301,12 +6397,15 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
|
||||
kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
|
||||
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
|
||||
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
|
||||
kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
|
||||
hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
port->state_machine_timer.function = state_machine_timer_handler;
|
||||
hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
|
||||
hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
port->enable_frs_timer.function = enable_frs_timer_handler;
|
||||
hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
port->send_discover_timer.function = send_discover_timer_handler;
|
||||
|
||||
spin_lock_init(&port->pd_event_lock);
|
||||
|
||||
|
@ -69,7 +69,7 @@ static int ucsi_psy_get_voltage_max(struct ucsi_connector *con,
|
||||
|
||||
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
|
||||
case UCSI_CONSTAT_PWR_OPMODE_PD:
|
||||
if (con->num_pdos > 0 && con->num_pdos <= UCSI_MAX_PDOS) {
|
||||
if (con->num_pdos > 0) {
|
||||
pdo = con->src_pdos[con->num_pdos - 1];
|
||||
val->intval = pdo_fixed_voltage(pdo) * 1000;
|
||||
} else {
|
||||
@ -98,7 +98,7 @@ static int ucsi_psy_get_voltage_now(struct ucsi_connector *con,
|
||||
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
|
||||
case UCSI_CONSTAT_PWR_OPMODE_PD:
|
||||
index = rdo_index(con->rdo);
|
||||
if (index > 0 && index <= UCSI_MAX_PDOS) {
|
||||
if (index > 0) {
|
||||
pdo = con->src_pdos[index - 1];
|
||||
val->intval = pdo_fixed_voltage(pdo) * 1000;
|
||||
} else {
|
||||
@ -125,7 +125,7 @@ static int ucsi_psy_get_current_max(struct ucsi_connector *con,
|
||||
|
||||
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
|
||||
case UCSI_CONSTAT_PWR_OPMODE_PD:
|
||||
if (con->num_pdos > 0 && con->num_pdos <= UCSI_MAX_PDOS) {
|
||||
if (con->num_pdos > 0) {
|
||||
pdo = con->src_pdos[con->num_pdos - 1];
|
||||
val->intval = pdo_max_current(pdo) * 1000;
|
||||
} else {
|
||||
|
@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
|
||||
}
|
||||
}
|
||||
|
||||
static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
|
||||
static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
|
||||
u32 *pdos, int offset, int num_pdos)
|
||||
{
|
||||
struct ucsi *ucsi = con->ucsi;
|
||||
u64 command;
|
||||
@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
|
||||
|
||||
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
|
||||
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
|
||||
command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
|
||||
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
|
||||
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
|
||||
command |= UCSI_GET_PDOS_SRC_PDOS;
|
||||
ret = ucsi_send_command(ucsi, command, con->src_pdos,
|
||||
sizeof(con->src_pdos));
|
||||
if (ret < 0) {
|
||||
ret = ucsi_send_command(ucsi, command, pdos + offset,
|
||||
num_pdos * sizeof(u32));
|
||||
if (ret < 0)
|
||||
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
|
||||
return;
|
||||
}
|
||||
con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
|
||||
if (ret == 0)
|
||||
if (ret == 0 && offset == 0)
|
||||
dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* UCSI max payload means only getting at most 4 PDOs at a time */
|
||||
ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
|
||||
if (con->num_pdos < UCSI_MAX_PDOS)
|
||||
return;
|
||||
|
||||
/* get the remaining PDOs, if any */
|
||||
ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
|
||||
PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
con->num_pdos += ret / sizeof(u32);
|
||||
}
|
||||
|
||||
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
|
||||
@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
|
||||
case UCSI_CONSTAT_PWR_OPMODE_PD:
|
||||
con->rdo = con->status.request_data_obj;
|
||||
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
|
||||
ucsi_get_pdos(con, 1);
|
||||
ucsi_get_src_pdos(con, 1);
|
||||
break;
|
||||
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
|
||||
con->rdo = 0;
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/power_supply.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/usb/typec.h>
|
||||
#include <linux/usb/pd.h>
|
||||
#include <linux/usb/role.h>
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
|
||||
|
||||
/* GET_PDOS command bits */
|
||||
#define UCSI_GET_PDOS_PARTNER_PDO(_r_) ((u64)(_r_) << 23)
|
||||
#define UCSI_GET_PDOS_PDO_OFFSET(_r_) ((u64)(_r_) << 24)
|
||||
#define UCSI_GET_PDOS_NUM_PDOS(_r_) ((u64)(_r_) << 32)
|
||||
#define UCSI_MAX_PDOS (4)
|
||||
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
|
||||
|
||||
/* -------------------------------------------------------------------------- */
|
||||
@ -302,7 +305,6 @@ struct ucsi {
|
||||
|
||||
#define UCSI_MAX_SVID 5
|
||||
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
|
||||
#define UCSI_MAX_PDOS (4)
|
||||
|
||||
#define UCSI_TYPEC_VSAFE5V 5000
|
||||
#define UCSI_TYPEC_1_5_CURRENT 1500
|
||||
@ -330,7 +332,7 @@ struct ucsi_connector {
|
||||
struct power_supply *psy;
|
||||
struct power_supply_desc psy_desc;
|
||||
u32 rdo;
|
||||
u32 src_pdos[UCSI_MAX_PDOS];
|
||||
u32 src_pdos[PDO_MAX_OBJECTS];
|
||||
int num_pdos;
|
||||
|
||||
struct usb_role_switch *usb_role_sw;
|
||||
|
@ -1896,6 +1896,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
loff_t size = i_size_read(bd_inode);
|
||||
struct blk_plug plug;
|
||||
size_t shorted = 0;
|
||||
ssize_t ret;
|
||||
|
||||
if (bdev_read_only(I_BDEV(bd_inode)))
|
||||
@ -1913,12 +1914,17 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
iov_iter_truncate(from, size - iocb->ki_pos);
|
||||
size -= iocb->ki_pos;
|
||||
if (iov_iter_count(from) > size) {
|
||||
shorted = iov_iter_count(from) - size;
|
||||
iov_iter_truncate(from, size);
|
||||
}
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = __generic_file_write_iter(iocb, from);
|
||||
if (ret > 0)
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
iov_iter_reexpand(from, iov_iter_count(from) + shorted);
|
||||
blk_finish_plug(&plug);
|
||||
return ret;
|
||||
}
|
||||
@ -1930,13 +1936,21 @@ ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
struct inode *bd_inode = bdev_file_inode(file);
|
||||
loff_t size = i_size_read(bd_inode);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
size_t shorted = 0;
|
||||
ssize_t ret;
|
||||
|
||||
if (pos >= size)
|
||||
return 0;
|
||||
|
||||
size -= pos;
|
||||
iov_iter_truncate(to, size);
|
||||
return generic_file_read_iter(iocb, to);
|
||||
if (iov_iter_count(to) > size) {
|
||||
shorted = iov_iter_count(to) - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_read_iter);
|
||||
|
||||
|
@ -1866,6 +1866,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
|
||||
u32 invalidating_gen = ci->i_rdcache_gen;
|
||||
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
ceph_fscache_invalidate(inode);
|
||||
invalidate_mapping_pages(&inode->i_data, 0, -1);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
|
||||
|
@ -129,6 +129,10 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
|
||||
|
||||
vino.ino = ino;
|
||||
vino.snap = CEPH_NOSNAP;
|
||||
|
||||
if (ceph_vino_is_reserved(vino))
|
||||
return ERR_PTR(-ESTALE);
|
||||
|
||||
inode = ceph_find_inode(sb, vino);
|
||||
if (!inode) {
|
||||
struct ceph_mds_request *req;
|
||||
@ -214,6 +218,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
|
||||
vino.ino = sfh->ino;
|
||||
vino.snap = sfh->snapid;
|
||||
}
|
||||
|
||||
if (ceph_vino_is_reserved(vino))
|
||||
return ERR_PTR(-ESTALE);
|
||||
|
||||
inode = ceph_find_inode(sb, vino);
|
||||
if (inode)
|
||||
return d_obtain_alias(inode);
|
||||
|
@ -56,6 +56,9 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
if (ceph_vino_is_reserved(vino))
|
||||
return ERR_PTR(-EREMOTEIO);
|
||||
|
||||
inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
|
||||
ceph_set_ino_cb, &vino);
|
||||
if (!inode)
|
||||
@ -87,14 +90,15 @@ struct inode *ceph_get_snapdir(struct inode *parent)
|
||||
inode->i_mtime = parent->i_mtime;
|
||||
inode->i_ctime = parent->i_ctime;
|
||||
inode->i_atime = parent->i_atime;
|
||||
inode->i_op = &ceph_snapdir_iops;
|
||||
inode->i_fop = &ceph_snapdir_fops;
|
||||
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
|
||||
ci->i_rbytes = 0;
|
||||
ci->i_btime = ceph_inode(parent)->i_btime;
|
||||
|
||||
if (inode->i_state & I_NEW)
|
||||
if (inode->i_state & I_NEW) {
|
||||
inode->i_op = &ceph_snapdir_iops;
|
||||
inode->i_fop = &ceph_snapdir_fops;
|
||||
ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
|
||||
unlock_new_inode(inode);
|
||||
}
|
||||
|
||||
return inode;
|
||||
}
|
||||
@ -1912,6 +1916,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
|
||||
orig_gen = ci->i_rdcache_gen;
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
|
||||
ceph_fscache_invalidate(inode);
|
||||
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
|
||||
pr_err("invalidate_pages %p fails\n", inode);
|
||||
}
|
||||
|
@ -433,6 +433,13 @@ static int ceph_parse_deleg_inos(void **p, void *end,
|
||||
|
||||
ceph_decode_64_safe(p, end, start, bad);
|
||||
ceph_decode_64_safe(p, end, len, bad);
|
||||
|
||||
/* Don't accept a delegation of system inodes */
|
||||
if (start < CEPH_INO_SYSTEM_BASE) {
|
||||
pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
|
||||
start, len);
|
||||
continue;
|
||||
}
|
||||
while (len--) {
|
||||
int err = xa_insert(&s->s_delegated_inos, ino = start++,
|
||||
DELEGATED_INO_AVAILABLE,
|
||||
|
@ -529,10 +529,34 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
|
||||
ci->i_vino.snap == pvino->snap;
|
||||
}
|
||||
|
||||
/*
|
||||
* The MDS reserves a set of inodes for its own usage. These should never
|
||||
* be accessible by clients, and so the MDS has no reason to ever hand these
|
||||
* out. The range is CEPH_MDS_INO_MDSDIR_OFFSET..CEPH_INO_SYSTEM_BASE.
|
||||
*
|
||||
* These come from src/mds/mdstypes.h in the ceph sources.
|
||||
*/
|
||||
#define CEPH_MAX_MDS 0x100
|
||||
#define CEPH_NUM_STRAY 10
|
||||
#define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS)
|
||||
#define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
|
||||
|
||||
static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
|
||||
{
|
||||
if (vino.ino < CEPH_INO_SYSTEM_BASE &&
|
||||
vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
|
||||
WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct inode *ceph_find_inode(struct super_block *sb,
|
||||
struct ceph_vino vino)
|
||||
{
|
||||
if (ceph_vino_is_reserved(vino))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* NB: The hashval will be run through the fs/inode.c hash function
|
||||
* anyway, so there is no need to squash the inode number down to
|
||||
|
@ -207,7 +207,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
|
||||
| NFS_INO_INVALID_SIZE
|
||||
| NFS_INO_REVAL_PAGECACHE
|
||||
| NFS_INO_INVALID_XATTR);
|
||||
}
|
||||
} else if (flags & NFS_INO_REVAL_PAGECACHE)
|
||||
flags |= NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE;
|
||||
|
||||
if (inode->i_mapping->nrpages == 0)
|
||||
flags &= ~(NFS_INO_INVALID_DATA|NFS_INO_DATA_INVAL_DEFER);
|
||||
|
@ -316,8 +316,6 @@ enum blk_zoned_model {
|
||||
};
|
||||
|
||||
struct queue_limits {
|
||||
unsigned int bio_max_bytes;
|
||||
|
||||
unsigned long bounce_pfn;
|
||||
unsigned long seg_boundary_mask;
|
||||
unsigned long virt_boundary_mask;
|
||||
|
@ -291,6 +291,7 @@ struct device_dma_parameters {
|
||||
* sg limitations.
|
||||
*/
|
||||
unsigned int max_segment_size;
|
||||
unsigned int min_align_mask;
|
||||
unsigned long segment_boundary_mask;
|
||||
};
|
||||
|
||||
|
@ -517,6 +517,22 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static inline unsigned int dma_get_min_align_mask(struct device *dev)
|
||||
{
|
||||
if (dev->dma_parms)
|
||||
return dev->dma_parms->min_align_mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dma_set_min_align_mask(struct device *dev,
|
||||
unsigned int min_align_mask)
|
||||
{
|
||||
if (WARN_ON_ONCE(!dev->dma_parms))
|
||||
return -EIO;
|
||||
dev->dma_parms->min_align_mask = min_align_mask;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dma_get_cache_alignment(void)
|
||||
{
|
||||
#ifdef ARCH_DMA_MINALIGN
|
||||
|
@ -404,13 +404,13 @@ void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
|
||||
phys_addr_t min_addr, phys_addr_t max_addr,
|
||||
int nid);
|
||||
|
||||
static inline void * __init memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_raw(phys_addr_t size,
|
||||
static inline void *memblock_alloc_raw(phys_addr_t size,
|
||||
phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
@ -418,7 +418,7 @@ static inline void * __init memblock_alloc_raw(phys_addr_t size,
|
||||
NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_from(phys_addr_t size,
|
||||
static inline void *memblock_alloc_from(phys_addr_t size,
|
||||
phys_addr_t align,
|
||||
phys_addr_t min_addr)
|
||||
{
|
||||
@ -426,33 +426,33 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_low(phys_addr_t size,
|
||||
static inline void *memblock_alloc_low(phys_addr_t size,
|
||||
phys_addr_t align)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
|
||||
}
|
||||
|
||||
static inline void * __init memblock_alloc_node(phys_addr_t size,
|
||||
static inline void *memblock_alloc_node(phys_addr_t size,
|
||||
phys_addr_t align, int nid)
|
||||
{
|
||||
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_early(phys_addr_t base,
|
||||
static inline void memblock_free_early(phys_addr_t base,
|
||||
phys_addr_t size)
|
||||
{
|
||||
memblock_free(base, size);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_early_nid(phys_addr_t base,
|
||||
static inline void memblock_free_early_nid(phys_addr_t base,
|
||||
phys_addr_t size, int nid)
|
||||
{
|
||||
memblock_free(base, size);
|
||||
}
|
||||
|
||||
static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
{
|
||||
__memblock_free_late(base, size);
|
||||
}
|
||||
@ -460,7 +460,7 @@ static inline void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
|
||||
/*
|
||||
* Set the allocation direction to bottom-up or top-down.
|
||||
*/
|
||||
static inline void __init memblock_set_bottom_up(bool enable)
|
||||
static inline void memblock_set_bottom_up(bool enable)
|
||||
{
|
||||
memblock.bottom_up = enable;
|
||||
}
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include <linux/dma-direction.h>
|
||||
#include <linux/keyslot-manager.h>
|
||||
|
||||
#include <linux/android_vendor.h>
|
||||
|
||||
struct mmc_ios {
|
||||
unsigned int clock; /* clock rate */
|
||||
unsigned short vdd;
|
||||
@ -244,6 +246,7 @@ struct mmc_async_req {
|
||||
struct mmc_slot {
|
||||
int cd_irq;
|
||||
bool cd_wake_enabled;
|
||||
ANDROID_OEM_DATA_ARRAY(1, 2);
|
||||
void *handler_priv;
|
||||
};
|
||||
|
||||
@ -286,10 +289,6 @@ struct mmc_host {
|
||||
u32 ocr_avail_sdio; /* SDIO-specific OCR */
|
||||
u32 ocr_avail_sd; /* SD-specific OCR */
|
||||
u32 ocr_avail_mmc; /* MMC-specific OCR */
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/* DO NOT USE, is not used, for abi preservation only */
|
||||
struct notifier_block pm_notify;
|
||||
#endif
|
||||
struct wakeup_source *ws; /* Enable consume of uevents */
|
||||
u32 max_current_330;
|
||||
u32 max_current_300;
|
||||
@ -486,6 +485,8 @@ struct mmc_host {
|
||||
/* Host Software Queue support */
|
||||
bool hsq_enabled;
|
||||
|
||||
ANDROID_OEM_DATA(1);
|
||||
|
||||
unsigned long private[] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
|
@ -206,6 +206,7 @@ void __init setup_log_buf(int early);
|
||||
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
|
||||
void dump_stack_print_info(const char *log_lvl);
|
||||
void show_regs_print_info(const char *log_lvl);
|
||||
extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold;
|
||||
extern asmlinkage void dump_stack(void) __cold;
|
||||
extern void printk_safe_flush(void);
|
||||
extern void printk_safe_flush_on_panic(void);
|
||||
@ -269,6 +270,10 @@ static inline void show_regs_print_info(const char *log_lvl)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dump_stack_lvl(const char *log_lvl)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dump_stack(void)
|
||||
{
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ enum swiotlb_force {
|
||||
* controllable.
|
||||
*/
|
||||
#define IO_TLB_SHIFT 11
|
||||
#define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
|
||||
|
||||
extern void swiotlb_init(int verbose);
|
||||
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
||||
|
@ -65,6 +65,9 @@ struct user_namespace {
|
||||
kgid_t group;
|
||||
struct ns_common ns;
|
||||
unsigned long flags;
|
||||
/* parent_could_setfcap: true if the creator if this ns had CAP_SETFCAP
|
||||
* in its effective capability set at the child ns creation time. */
|
||||
bool parent_could_setfcap;
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
/* List of joinable keyrings in this namespace. Modification access of
|
||||
|
@ -65,14 +65,18 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
||||
u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
|
||||
u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
|
||||
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
|
||||
|
||||
if (!pskb_may_pull(skb, needed))
|
||||
return -EINVAL;
|
||||
|
||||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
|
||||
p_off = skb_transport_offset(skb) + thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
if (!pskb_may_pull(skb, p_off))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
@ -102,14 +106,14 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
}
|
||||
|
||||
p_off = keys.control.thoff + thlen;
|
||||
if (p_off > skb_headlen(skb) ||
|
||||
if (!pskb_may_pull(skb, p_off) ||
|
||||
keys.basic.ip_proto != ip_proto)
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
} else if (gso_type) {
|
||||
p_off = thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
if (!pskb_may_pull(skb, p_off))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <asm/page.h> /* pgprot_t */
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/android_vendor.h>
|
||||
|
||||
#include <asm/vmalloc.h>
|
||||
|
||||
@ -57,6 +58,7 @@ struct vm_struct {
|
||||
unsigned int nr_pages;
|
||||
phys_addr_t phys_addr;
|
||||
const void *caller;
|
||||
ANDROID_OEM_DATA(1);
|
||||
};
|
||||
|
||||
struct vmap_area {
|
||||
|
@ -303,12 +303,14 @@ struct v4l2_ctrl {
|
||||
* the control has been applied. This prevents applying controls
|
||||
* from a cluster with multiple controls twice (when the first
|
||||
* control of a cluster is applied, they all are).
|
||||
* @req: If set, this refers to another request that sets this control.
|
||||
* @valid_p_req: If set, then p_req contains the control value for the request.
|
||||
* @p_req: If the control handler containing this control reference
|
||||
* is bound to a media request, then this points to the
|
||||
* value of the control that should be applied when the request
|
||||
* value of the control that must be applied when the request
|
||||
* is executed, or to the value of the control at the time
|
||||
* that the request was completed.
|
||||
* that the request was completed. If @valid_p_req is false,
|
||||
* then this control was never set for this request and the
|
||||
* control will not be updated when this request is applied.
|
||||
*
|
||||
* Each control handler has a list of these refs. The list_head is used to
|
||||
* keep a sorted-by-control-ID list of all controls, while the next pointer
|
||||
@ -321,7 +323,7 @@ struct v4l2_ctrl_ref {
|
||||
struct v4l2_ctrl_helper *helper;
|
||||
bool from_other_dev;
|
||||
bool req_done;
|
||||
struct v4l2_ctrl_ref *req;
|
||||
bool valid_p_req;
|
||||
union v4l2_ctrl_ptr p_req;
|
||||
};
|
||||
|
||||
@ -348,7 +350,7 @@ struct v4l2_ctrl_ref {
|
||||
* @error: The error code of the first failed control addition.
|
||||
* @request_is_queued: True if the request was queued.
|
||||
* @requests: List to keep track of open control handler request objects.
|
||||
* For the parent control handler (@req_obj.req == NULL) this
|
||||
* For the parent control handler (@req_obj.ops == NULL) this
|
||||
* is the list header. When the parent control handler is
|
||||
* removed, it has to unbind and put all these requests since
|
||||
* they refer to the parent.
|
||||
|
@ -15,10 +15,11 @@ struct cpumask;
|
||||
DECLARE_HOOK(android_vh_gic_v3_affinity_init,
|
||||
TP_PROTO(int irq, u32 offset, u64 *affinity),
|
||||
TP_ARGS(irq, offset, affinity));
|
||||
DECLARE_HOOK(android_vh_gic_v3_set_affinity,
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_gic_v3_set_affinity,
|
||||
TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
|
||||
u64 *affinity, bool force, void __iomem *base),
|
||||
TP_ARGS(d, mask_val, affinity, force, base));
|
||||
TP_ARGS(d, mask_val, affinity, force, base),
|
||||
1);
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
|
24
include/trace/hooks/hung_task.h
Normal file
24
include/trace/hooks/hung_task.h
Normal file
@ -0,0 +1,24 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM hung_task
|
||||
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_HUNG_TASK_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_HUNG_TASK_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
DECLARE_HOOK(android_vh_check_uninterruptible_tasks,
|
||||
TP_PROTO(struct task_struct *t, unsigned long timeout,
|
||||
bool *need_check),
|
||||
TP_ARGS(t, timeout, need_check));
|
||||
|
||||
DECLARE_HOOK(android_vh_check_uninterruptible_tasks_dn,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused));
|
||||
|
||||
#endif /* _TRACE_HOOK_HUNG_TASK_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -18,9 +18,18 @@ DECLARE_HOOK(android_vh_iommu_setup_dma_ops,
|
||||
TP_PROTO(struct device *dev, u64 dma_base, u64 size),
|
||||
TP_ARGS(dev, dma_base, size));
|
||||
|
||||
DECLARE_HOOK(android_vh_iommu_alloc_iova,
|
||||
TP_PROTO(struct device *dev, dma_addr_t iova, size_t size),
|
||||
TP_ARGS(dev, iova, size));
|
||||
|
||||
DECLARE_HOOK(android_vh_iommu_free_iova,
|
||||
TP_PROTO(dma_addr_t iova, size_t size),
|
||||
TP_ARGS(iova, size));
|
||||
#else
|
||||
|
||||
#define trace_android_vh_iommu_setup_dma_ops(dev, dma_base, size)
|
||||
#define trace_android_vh_iommu_alloc_iova(dev, iova, size)
|
||||
#define trace_android_vh_iommu_free_iova(iova, size)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -47,6 +47,17 @@ DECLARE_HOOK(android_vh_meminfo_proc_show,
|
||||
DECLARE_HOOK(android_vh_exit_mm,
|
||||
TP_PROTO(struct mm_struct *mm),
|
||||
TP_ARGS(mm));
|
||||
DECLARE_HOOK(android_vh_get_unmapped_area_from_anti_fragment_pool,
|
||||
TP_PROTO(struct mm_struct *mm, struct vm_unmapped_area_info *info,
|
||||
unsigned long *addr),
|
||||
TP_ARGS(mm, info, addr));
|
||||
DECLARE_HOOK(android_vh_exclude_reserved_zone,
|
||||
TP_PROTO(struct mm_struct *mm, struct vm_unmapped_area_info *info),
|
||||
TP_ARGS(mm, info));
|
||||
DECLARE_HOOK(android_vh_get_unmapped_area_include_reserved_zone,
|
||||
TP_PROTO(struct mm_struct *mm, struct vm_unmapped_area_info *info,
|
||||
unsigned long *addr),
|
||||
TP_ARGS(mm, info, addr));
|
||||
DECLARE_HOOK(android_vh_show_mem,
|
||||
TP_PROTO(unsigned int filter, nodemask_t *nodemask),
|
||||
TP_ARGS(filter, nodemask));
|
||||
@ -60,6 +71,22 @@ struct slabinfo;
|
||||
DECLARE_HOOK(android_vh_cache_show,
|
||||
TP_PROTO(struct seq_file *m, struct slabinfo *sinfo, struct kmem_cache *s),
|
||||
TP_ARGS(m, sinfo, s));
|
||||
struct dirty_throttle_control;
|
||||
DECLARE_HOOK(android_vh_mm_dirty_limits,
|
||||
TP_PROTO(struct dirty_throttle_control *const gdtc, bool strictlimit,
|
||||
unsigned long dirty, unsigned long bg_thresh,
|
||||
unsigned long nr_reclaimable, unsigned long pages_dirtied),
|
||||
TP_ARGS(gdtc, strictlimit, dirty, bg_thresh,
|
||||
nr_reclaimable, pages_dirtied));
|
||||
DECLARE_HOOK(android_vh_save_vmalloc_stack,
|
||||
TP_PROTO(unsigned long flags, struct vm_struct *vm),
|
||||
TP_ARGS(flags, vm));
|
||||
DECLARE_HOOK(android_vh_show_stack_hash,
|
||||
TP_PROTO(struct seq_file *m, struct vm_struct *v),
|
||||
TP_ARGS(m, v));
|
||||
DECLARE_HOOK(android_vh_save_track_hash,
|
||||
TP_PROTO(unsigned long p),
|
||||
TP_ARGS(p));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_MM_H */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user