Merge c0cc271173
("Merge tag 'modules-for-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux") into android-mainline
Steps along the way to 5.7-rc1 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I8d1ec816336fcc58a82ac85c6f5a9dacdd61dbf0
This commit is contained in:
commit
3d496e90bf
@ -1509,7 +1509,10 @@ config ARM64_PTR_AUTH
|
||||
default y
|
||||
depends on !KVM || ARM64_VHE
|
||||
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
|
||||
depends on CC_IS_GCC || (CC_IS_CLANG && AS_HAS_CFI_NEGATE_RA_STATE)
|
||||
# GCC 9.1 and later inserts a .note.gnu.property section note for PAC
|
||||
# which is only understood by binutils starting with version 2.33.1.
|
||||
depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
|
||||
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
|
||||
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
|
||||
help
|
||||
Pointer authentication (part of the ARMv8.3 Extensions) provides
|
||||
|
@ -52,19 +52,6 @@ config DEBUG_WX
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
config DEBUG_ALIGN_RODATA
|
||||
depends on STRICT_KERNEL_RWX
|
||||
bool "Align linker sections up to SECTION_SIZE"
|
||||
help
|
||||
If this option is enabled, sections that may potentially be marked as
|
||||
read only or non-executable will be aligned up to the section size of
|
||||
the kernel. This prevents sections from being split into pages and
|
||||
avoids a potential TLB penalty. The downside is an increase in
|
||||
alignment and potentially wasted space. Turn on this option if
|
||||
performance is more important than memory pressure.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config DEBUG_EFI
|
||||
depends on EFI && DEBUG_INFO
|
||||
bool "UEFI debugging"
|
||||
|
@ -65,6 +65,10 @@ stack_protector_prepare: prepare0
|
||||
include/generated/asm-offsets.h))
|
||||
endif
|
||||
|
||||
# Ensure that if the compiler supports branch protection we default it
|
||||
# off, this will be overridden if we are using branch protection.
|
||||
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
|
||||
|
||||
ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
|
||||
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
|
||||
branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pac-ret+leaf
|
||||
@ -73,13 +77,14 @@ branch-prot-flags-$(CONFIG_CC_HAS_BRANCH_PROT_PAC_RET) := -mbranch-protection=pa
|
||||
# we pass it only to the assembler. This option is utilized only in case of non
|
||||
# integrated assemblers.
|
||||
branch-prot-flags-$(CONFIG_AS_HAS_PAC) += -Wa,-march=armv8.3-a
|
||||
KBUILD_CFLAGS += $(branch-prot-flags-y)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_SHADOW_CALL_STACK), y)
|
||||
KBUILD_CFLAGS += -ffixed-x18
|
||||
endif
|
||||
|
||||
KBUILD_CFLAGS += $(branch-prot-flags-y)
|
||||
|
||||
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
|
||||
KBUILD_CPPFLAGS += -mbig-endian
|
||||
CHECKFLAGS += -D__AARCH64EB__
|
||||
|
@ -120,22 +120,12 @@
|
||||
|
||||
/*
|
||||
* Alignment of kernel segments (e.g. .text, .data).
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_ALIGN_RODATA)
|
||||
/*
|
||||
* 4 KB granule: 1 level 2 entry
|
||||
* 16 KB granule: 128 level 3 entries, with contiguous bit
|
||||
* 64 KB granule: 32 level 3 entries, with contiguous bit
|
||||
*/
|
||||
#define SEGMENT_ALIGN SZ_2M
|
||||
#else
|
||||
/*
|
||||
*
|
||||
* 4 KB granule: 16 level 3 entries, with contiguous bit
|
||||
* 16 KB granule: 4 level 3 entries, without contiguous bit
|
||||
* 64 KB granule: 1 level 3 entry
|
||||
*/
|
||||
#define SEGMENT_ALIGN SZ_64K
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Memory types available.
|
||||
|
@ -601,7 +601,7 @@ static struct undef_hook setend_hooks[] = {
|
||||
},
|
||||
{
|
||||
/* Thumb mode */
|
||||
.instr_mask = 0x0000fff7,
|
||||
.instr_mask = 0xfffffff7,
|
||||
.instr_val = 0x0000b650,
|
||||
.pstate_mask = (PSR_AA32_T_BIT | PSR_AA32_MODE_MASK),
|
||||
.pstate_val = (PSR_AA32_T_BIT | PSR_AA32_MODE_USR),
|
||||
|
@ -68,14 +68,6 @@ static irqreturn_t hw_tick(int irq, void *dummy)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct irqaction m68328_timer_irq = {
|
||||
.name = "timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = hw_tick,
|
||||
};
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static u64 m68328_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -102,11 +94,17 @@ static struct clocksource m68328_clk = {
|
||||
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* disable timer 1 */
|
||||
TCTL = 0;
|
||||
|
||||
/* set ISR */
|
||||
setup_irq(TMR_IRQ_NUM, &m68328_timer_irq);
|
||||
ret = request_irq(TMR_IRQ_NUM, hw_tick, IRQF_TIMER, "timer", NULL);
|
||||
if (ret) {
|
||||
pr_err("Failed to request irq %d (timer): %pe\n", TMR_IRQ_NUM,
|
||||
ERR_PTR(ret));
|
||||
}
|
||||
|
||||
/* Restart mode, Enable int, Set clock source */
|
||||
TCTL = TCTL_OM | TCTL_IRQEN | CLOCK_SOURCE;
|
||||
|
@ -111,14 +111,6 @@ static irqreturn_t pit_tick(int irq, void *dummy)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct irqaction pit_irq = {
|
||||
.name = "timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = pit_tick,
|
||||
};
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static u64 pit_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -146,6 +138,8 @@ static struct clocksource pit_clk = {
|
||||
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
int ret;
|
||||
|
||||
cf_pit_clockevent.cpumask = cpumask_of(smp_processor_id());
|
||||
cf_pit_clockevent.mult = div_sc(FREQ, NSEC_PER_SEC, 32);
|
||||
cf_pit_clockevent.max_delta_ns =
|
||||
@ -156,7 +150,11 @@ void hw_timer_init(irq_handler_t handler)
|
||||
cf_pit_clockevent.min_delta_ticks = 0x3f;
|
||||
clockevents_register_device(&cf_pit_clockevent);
|
||||
|
||||
setup_irq(MCF_IRQ_PIT1, &pit_irq);
|
||||
ret = request_irq(MCF_IRQ_PIT1, pit_tick, IRQF_TIMER, "timer", NULL);
|
||||
if (ret) {
|
||||
pr_err("Failed to request irq %d (timer): %pe\n", MCF_IRQ_PIT1,
|
||||
ERR_PTR(ret));
|
||||
}
|
||||
|
||||
clocksource_register_hz(&pit_clk, FREQ);
|
||||
}
|
||||
|
@ -50,18 +50,19 @@ irqreturn_t mcfslt_profile_tick(int irq, void *dummy)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction mcfslt_profile_irq = {
|
||||
.name = "profile timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = mcfslt_profile_tick,
|
||||
};
|
||||
|
||||
void mcfslt_profile_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO "PROFILE: lodging TIMER 1 @ %dHz as profile timer\n",
|
||||
PROFILEHZ);
|
||||
|
||||
setup_irq(MCF_IRQ_PROFILER, &mcfslt_profile_irq);
|
||||
ret = request_irq(MCF_IRQ_PROFILER, mcfslt_profile_tick, IRQF_TIMER,
|
||||
"profile timer", NULL);
|
||||
if (ret) {
|
||||
pr_err("Failed to request irq %d (profile timer): %pe\n",
|
||||
MCF_IRQ_PROFILER, ERR_PTR(ret));
|
||||
}
|
||||
|
||||
/* Set up TIMER 2 as high speed profile clock */
|
||||
__raw_writel(MCF_BUSCLK / PROFILEHZ - 1, PA(MCFSLT_STCNT));
|
||||
@ -92,12 +93,6 @@ static irqreturn_t mcfslt_tick(int irq, void *dummy)
|
||||
return timer_interrupt(irq, dummy);
|
||||
}
|
||||
|
||||
static struct irqaction mcfslt_timer_irq = {
|
||||
.name = "timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = mcfslt_tick,
|
||||
};
|
||||
|
||||
static u64 mcfslt_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -126,6 +121,8 @@ static struct clocksource mcfslt_clk = {
|
||||
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
int r;
|
||||
|
||||
mcfslt_cycles_per_jiffy = MCF_BUSCLK / HZ;
|
||||
/*
|
||||
* The coldfire slice timer (SLT) runs from STCNT to 0 included,
|
||||
@ -140,7 +137,11 @@ void hw_timer_init(irq_handler_t handler)
|
||||
mcfslt_cnt = mcfslt_cycles_per_jiffy;
|
||||
|
||||
timer_interrupt = handler;
|
||||
setup_irq(MCF_IRQ_TIMER, &mcfslt_timer_irq);
|
||||
r = request_irq(MCF_IRQ_TIMER, mcfslt_tick, IRQF_TIMER, "timer", NULL);
|
||||
if (r) {
|
||||
pr_err("Failed to request irq %d (timer): %pe\n", MCF_IRQ_TIMER,
|
||||
ERR_PTR(r));
|
||||
}
|
||||
|
||||
clocksource_register_hz(&mcfslt_clk, MCF_BUSCLK);
|
||||
|
||||
|
@ -82,14 +82,6 @@ static irqreturn_t mcftmr_tick(int irq, void *dummy)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct irqaction mcftmr_timer_irq = {
|
||||
.name = "timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = mcftmr_tick,
|
||||
};
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static u64 mcftmr_read_clk(struct clocksource *cs)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -118,6 +110,8 @@ static struct clocksource mcftmr_clk = {
|
||||
|
||||
void hw_timer_init(irq_handler_t handler)
|
||||
{
|
||||
int r;
|
||||
|
||||
__raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR));
|
||||
mcftmr_cycles_per_jiffy = FREQ / HZ;
|
||||
/*
|
||||
@ -134,7 +128,11 @@ void hw_timer_init(irq_handler_t handler)
|
||||
|
||||
timer_interrupt = handler;
|
||||
init_timer_irq();
|
||||
setup_irq(MCF_IRQ_TIMER, &mcftmr_timer_irq);
|
||||
r = request_irq(MCF_IRQ_TIMER, mcftmr_tick, IRQF_TIMER, "timer", NULL);
|
||||
if (r) {
|
||||
pr_err("Failed to request irq %d (timer): %pe\n", MCF_IRQ_TIMER,
|
||||
ERR_PTR(r));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHPROFILE
|
||||
coldfire_profile_init();
|
||||
@ -170,14 +168,10 @@ irqreturn_t coldfire_profile_tick(int irq, void *dummy)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct irqaction coldfire_profile_irq = {
|
||||
.name = "profile timer",
|
||||
.flags = IRQF_TIMER,
|
||||
.handler = coldfire_profile_tick,
|
||||
};
|
||||
|
||||
void coldfire_profile_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n",
|
||||
PROFILEHZ);
|
||||
|
||||
@ -188,7 +182,12 @@ void coldfire_profile_init(void)
|
||||
__raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 |
|
||||
MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR));
|
||||
|
||||
setup_irq(MCF_IRQ_PROFILER, &coldfire_profile_irq);
|
||||
ret = request_irq(MCF_IRQ_PROFILER, coldfire_profile_tick, IRQF_TIMER,
|
||||
"profile timer", NULL);
|
||||
if (ret) {
|
||||
pr_err("Failed to request irq %d (profile timer): %pe\n",
|
||||
MCF_IRQ_PROFILER, ERR_PTR(ret));
|
||||
}
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -266,8 +266,9 @@ config PANIC_TIMEOUT
|
||||
default 180
|
||||
|
||||
config COMPAT
|
||||
bool
|
||||
default y if PPC64
|
||||
bool "Enable support for 32bit binaries"
|
||||
depends on PPC64
|
||||
default y if !CPU_LITTLE_ENDIAN
|
||||
select COMPAT_BINFMT_ELF
|
||||
select ARCH_WANT_OLD_COMPAT_IPC
|
||||
select COMPAT_OLD_SIGACTION
|
||||
|
@ -60,6 +60,8 @@ CONFIG_CFG80211=m
|
||||
CONFIG_CFG80211_WEXT=y
|
||||
CONFIG_MAC80211=m
|
||||
# CONFIG_MAC80211_RC_MINSTREL is not set
|
||||
CONFIG_UEVENT_HELPER=y
|
||||
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_SIZE=65535
|
||||
|
@ -162,10 +162,10 @@ static inline bool test_thread_local_flags(unsigned int flags)
|
||||
return (ti->local_flags & flags) != 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
|
||||
#else
|
||||
#define is_32bit_task() (1)
|
||||
#define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PPC64)
|
||||
|
@ -31,6 +31,7 @@
|
||||
#define __ARCH_WANT_SYS_SOCKETCALL
|
||||
#define __ARCH_WANT_SYS_FADVISE64
|
||||
#define __ARCH_WANT_SYS_GETPGRP
|
||||
#define __ARCH_WANT_SYS_LLSEEK
|
||||
#define __ARCH_WANT_SYS_NICE
|
||||
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
|
||||
#define __ARCH_WANT_SYS_OLD_UNAME
|
||||
|
@ -40,16 +40,17 @@ CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
|
||||
endif
|
||||
|
||||
obj-y := cputable.o syscalls.o \
|
||||
irq.o align.o signal_32.o pmc.o vdso.o \
|
||||
irq.o align.o signal_$(BITS).o pmc.o vdso.o \
|
||||
process.o systbl.o idle.o \
|
||||
signal.o sysfs.o cacheinfo.o time.o \
|
||||
prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o misc_$(BITS).o \
|
||||
of_platform.o prom_parse.o
|
||||
obj-y += ptrace/
|
||||
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o signal_64.o \
|
||||
obj-$(CONFIG_PPC64) += setup_64.o \
|
||||
paca.o nvram_64.o firmware.o note.o \
|
||||
syscall_64.o
|
||||
obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
|
||||
obj-$(CONFIG_VDSO32) += vdso32/
|
||||
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
|
@ -52,8 +52,10 @@
|
||||
SYS_CALL_TABLE:
|
||||
.tc sys_call_table[TC],sys_call_table
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYS_CALL_TABLE:
|
||||
.tc compat_sys_call_table[TC],compat_sys_call_table
|
||||
#endif
|
||||
|
||||
/* This value is used to mark exception frames on the stack. */
|
||||
exception_marker:
|
||||
|
@ -3121,22 +3121,3 @@ handle_dabr_fault:
|
||||
li r5,SIGSEGV
|
||||
bl bad_page_fault
|
||||
b interrupt_return
|
||||
|
||||
/*
|
||||
* When doorbell is triggered from system reset wakeup, the message is
|
||||
* not cleared, so it would fire again when EE is enabled.
|
||||
*
|
||||
* When coming from local_irq_enable, there may be the same problem if
|
||||
* we were hard disabled.
|
||||
*
|
||||
* Execute msgclr to clear pending exceptions before handling it.
|
||||
*/
|
||||
h_doorbell_common_msgclr:
|
||||
LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
|
||||
PPC_MSGCLR(3)
|
||||
b h_doorbell_common_virt
|
||||
|
||||
doorbell_super_common_msgclr:
|
||||
LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
|
||||
PPC_MSGCLRP(3)
|
||||
b doorbell_super_common_virt
|
||||
|
@ -527,6 +527,19 @@ void irq_set_pending_from_srr1(unsigned long srr1)
|
||||
return;
|
||||
}
|
||||
|
||||
if (reason == PACA_IRQ_DBELL) {
|
||||
/*
|
||||
* When doorbell triggers a system reset wakeup, the message
|
||||
* is not cleared, so if the doorbell interrupt is replayed
|
||||
* and the IPI handled, the doorbell interrupt would still
|
||||
* fire when EE is enabled.
|
||||
*
|
||||
* To avoid taking the superfluous doorbell interrupt,
|
||||
* execute a msgclr here before the interrupt is replayed.
|
||||
*/
|
||||
ppc_msgclr(PPC_DBELL_MSGTYPE);
|
||||
}
|
||||
|
||||
/*
|
||||
* The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
|
||||
* so this can be called unconditionally with the SRR1 wake
|
||||
|
@ -55,14 +55,17 @@ _GLOBAL(ppc_save_regs)
|
||||
PPC_STL r29,29*SZL(r3)
|
||||
PPC_STL r30,30*SZL(r3)
|
||||
PPC_STL r31,31*SZL(r3)
|
||||
lbz r0,PACAIRQSOFTMASK(r13)
|
||||
PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
|
||||
#endif
|
||||
/* go up one stack frame for SP */
|
||||
PPC_LL r4,0(r1)
|
||||
PPC_STL r4,1*SZL(r3)
|
||||
/* get caller's LR */
|
||||
PPC_LL r0,LRSAVE(r4)
|
||||
PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
|
||||
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
|
||||
mflr r0
|
||||
PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
|
||||
mfmsr r0
|
||||
PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
|
||||
mfctr r0
|
||||
@ -73,4 +76,5 @@ _GLOBAL(ppc_save_regs)
|
||||
PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
|
||||
li r0,0
|
||||
PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
|
||||
PPC_STL r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3)
|
||||
blr
|
||||
|
@ -6,7 +6,7 @@
|
||||
CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
|
||||
|
||||
obj-y += ptrace.o ptrace-view.o
|
||||
obj-$(CONFIG_PPC64) += ptrace32.o
|
||||
obj-$(CONFIG_COMPAT) += ptrace32.o
|
||||
obj-$(CONFIG_VSX) += ptrace-vsx.o
|
||||
ifneq ($(CONFIG_VSX),y)
|
||||
obj-y += ptrace-novsx.o
|
||||
|
@ -18,12 +18,153 @@
|
||||
#include <linux/syscalls.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/tm.h>
|
||||
|
||||
#include "signal.h"
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
unsigned long copy_fpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
buf[i] = task->thread.TS_FPR(i);
|
||||
buf[i] = task->thread.fp_state.fpscr;
|
||||
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_fpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
task->thread.TS_FPR(i) = buf[i];
|
||||
task->thread.fp_state.fpscr = buf[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long copy_vsx_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < ELF_NVSRHALFREG; i++)
|
||||
buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_vsx_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < ELF_NVSRHALFREG ; i++)
|
||||
task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
unsigned long copy_ckfpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
buf[i] = task->thread.TS_CKFPR(i);
|
||||
buf[i] = task->thread.ckfp_state.fpscr;
|
||||
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_ckfpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
task->thread.TS_CKFPR(i) = buf[i];
|
||||
task->thread.ckfp_state.fpscr = buf[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long copy_ckvsx_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < ELF_NVSRHALFREG; i++)
|
||||
buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_ckvsx_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < ELF_NVSRHALFREG ; i++)
|
||||
task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#else
|
||||
inline unsigned long copy_fpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
return __copy_to_user(to, task->thread.fp_state.fpr,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
inline unsigned long copy_fpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
return __copy_from_user(task->thread.fp_state.fpr, from,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
inline unsigned long copy_ckfpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
return __copy_to_user(to, task->thread.ckfp_state.fpr,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
return __copy_from_user(task->thread.ckfp_state.fpr, from,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#endif
|
||||
|
||||
/* Log an error when sending an unhandled signal to a process. Controlled
|
||||
* through debug.exception-trace sysctl.
|
||||
*/
|
||||
@ -106,7 +247,6 @@ static void do_signal(struct task_struct *tsk)
|
||||
sigset_t *oldset = sigmask_to_save();
|
||||
struct ksignal ksig = { .sig = 0 };
|
||||
int ret;
|
||||
int is32 = is_32bit_task();
|
||||
|
||||
BUG_ON(tsk != current);
|
||||
|
||||
@ -136,7 +276,7 @@ static void do_signal(struct task_struct *tsk)
|
||||
|
||||
rseq_signal_deliver(&ksig, tsk->thread.regs);
|
||||
|
||||
if (is32) {
|
||||
if (is_32bit_task()) {
|
||||
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
|
||||
ret = handle_rt_signal32(&ksig, oldset, tsk);
|
||||
else
|
||||
|
@ -235,146 +235,6 @@ struct rt_sigframe {
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
unsigned long copy_fpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
buf[i] = task->thread.TS_FPR(i);
|
||||
buf[i] = task->thread.fp_state.fpscr;
|
||||
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_fpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
task->thread.TS_FPR(i) = buf[i];
|
||||
task->thread.fp_state.fpscr = buf[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long copy_vsx_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < ELF_NVSRHALFREG; i++)
|
||||
buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_vsx_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < ELF_NVSRHALFREG ; i++)
|
||||
task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
unsigned long copy_ckfpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
buf[i] = task->thread.TS_CKFPR(i);
|
||||
buf[i] = task->thread.ckfp_state.fpscr;
|
||||
return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_ckfpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NFPREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < (ELF_NFPREG - 1) ; i++)
|
||||
task->thread.TS_CKFPR(i) = buf[i];
|
||||
task->thread.ckfp_state.fpscr = buf[i];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned long copy_ckvsx_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
/* save FPR copy to local buffer then write to the thread_struct */
|
||||
for (i = 0; i < ELF_NVSRHALFREG; i++)
|
||||
buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
||||
return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
|
||||
}
|
||||
|
||||
unsigned long copy_ckvsx_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
u64 buf[ELF_NVSRHALFREG];
|
||||
int i;
|
||||
|
||||
if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
|
||||
return 1;
|
||||
for (i = 0; i < ELF_NVSRHALFREG ; i++)
|
||||
task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#else
|
||||
inline unsigned long copy_fpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
return __copy_to_user(to, task->thread.fp_state.fpr,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
inline unsigned long copy_fpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
return __copy_from_user(task->thread.fp_state.fpr, from,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
inline unsigned long copy_ckfpr_to_user(void __user *to,
|
||||
struct task_struct *task)
|
||||
{
|
||||
return __copy_to_user(to, task->thread.ckfp_state.fpr,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
|
||||
inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
|
||||
void __user *from)
|
||||
{
|
||||
return __copy_from_user(task->thread.ckfp_state.fpr, from,
|
||||
ELF_NFPREG * sizeof(double));
|
||||
}
|
||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Save the current user registers on the user stack.
|
||||
* We only save the altivec/spe registers if the process has used
|
||||
|
@ -22,7 +22,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
||||
long r6, long r7, long r8,
|
||||
unsigned long r0, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long ti_flags;
|
||||
syscall_fn f;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
|
||||
@ -60,8 +59,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
ti_flags = current_thread_info()->flags;
|
||||
if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
if (unlikely(current_thread_info()->flags & _TIF_SYSCALL_DOTRACE)) {
|
||||
/*
|
||||
* We use the return value of do_syscall_trace_enter() as the
|
||||
* syscall number. If the syscall was rejected for any reason
|
||||
@ -86,7 +84,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
|
||||
/* May be faster to do array_index_nospec? */
|
||||
barrier_nospec();
|
||||
|
||||
if (unlikely(ti_flags & _TIF_32BIT)) {
|
||||
if (unlikely(is_32bit_task())) {
|
||||
f = (void *)compat_sys_call_table[r0];
|
||||
|
||||
r3 &= 0x00000000ffffffffULL;
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/of_clk.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/sched/cputime.h>
|
||||
#include <linux/processor.h>
|
||||
@ -522,35 +522,6 @@ static inline void clear_irq_work_pending(void)
|
||||
"i" (offsetof(struct paca_struct, irq_work_pending)));
|
||||
}
|
||||
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
preempt_disable();
|
||||
set_irq_work_pending_flag();
|
||||
/*
|
||||
* Non-nmi code running with interrupts disabled will replay
|
||||
* irq_happened before it re-enables interrupts, so setthe
|
||||
* decrementer there instead of causing a hardware exception
|
||||
* which would immediately hit the masked interrupt handler
|
||||
* and have the net effect of setting the decrementer in
|
||||
* irq_happened.
|
||||
*
|
||||
* NMI interrupts can not check this when they return, so the
|
||||
* decrementer hardware exception is raised, which will fire
|
||||
* when interrupts are next enabled.
|
||||
*
|
||||
* BookE does not support this yet, it must audit all NMI
|
||||
* interrupt handlers to ensure they call nmi_enter() so this
|
||||
* check would be correct.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
|
||||
set_dec(1);
|
||||
} else {
|
||||
hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_DEC;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#else /* 32-bit */
|
||||
|
||||
DEFINE_PER_CPU(u8, irq_work_pending);
|
||||
@ -559,16 +530,27 @@ DEFINE_PER_CPU(u8, irq_work_pending);
|
||||
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
||||
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
|
||||
|
||||
#endif /* 32 vs 64 bit */
|
||||
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
/*
|
||||
* 64-bit code that uses irq soft-mask can just cause an immediate
|
||||
* interrupt here that gets soft masked, if this is called under
|
||||
* local_irq_disable(). It might be possible to prevent that happening
|
||||
* by noticing interrupts are disabled and setting decrementer pending
|
||||
* to be replayed when irqs are enabled. The problem there is that
|
||||
* tracing can call irq_work_raise, including in code that does low
|
||||
* level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
|
||||
* which could get tangled up if we're messing with the same state
|
||||
* here.
|
||||
*/
|
||||
preempt_disable();
|
||||
set_irq_work_pending_flag();
|
||||
set_dec(1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif /* 32 vs 64 bit */
|
||||
|
||||
#else /* CONFIG_IRQ_WORK */
|
||||
|
||||
#define test_irq_work_pending() 0
|
||||
@ -1149,9 +1131,7 @@ void __init time_init(void)
|
||||
init_decrementer_clockevent();
|
||||
tick_setup_hrtimer_broadcast();
|
||||
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
of_clk_init(NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -651,7 +651,8 @@ static void __init vdso_setup_syscall_map(void)
|
||||
if (sys_call_table[i] != sys_ni_syscall)
|
||||
vdso_data->syscall_map_64[i >> 5] |=
|
||||
0x80000000UL >> (i & 0x1f);
|
||||
if (compat_sys_call_table[i] != sys_ni_syscall)
|
||||
if (IS_ENABLED(CONFIG_COMPAT) &&
|
||||
compat_sys_call_table[i] != sys_ni_syscall)
|
||||
vdso_data->syscall_map_32[i >> 5] |=
|
||||
0x80000000UL >> (i & 0x1f);
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
@ -1,6 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += callchain.o callchain_$(BITS).o perf_regs.o
|
||||
ifdef CONFIG_COMPAT
|
||||
obj-$(CONFIG_PERF_EVENTS) += callchain_32.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
|
||||
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
|
||||
|
@ -15,11 +15,9 @@
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#ifdef CONFIG_PPC64
|
||||
#include "../kernel/ppc32.h"
|
||||
#endif
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#include "callchain.h"
|
||||
|
||||
/*
|
||||
* Is sp valid as the address of the next kernel stack frame after prev_sp?
|
||||
@ -102,358 +100,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* On 64-bit we don't want to invoke hash_page on user addresses from
|
||||
* interrupt context, so if the access faults, we read the page tables
|
||||
* to find which page (if any) is mapped and access it directly.
|
||||
*/
|
||||
static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
||||
{
|
||||
int ret = -EFAULT;
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep, pte;
|
||||
unsigned shift;
|
||||
unsigned long addr = (unsigned long) ptr;
|
||||
unsigned long offset;
|
||||
unsigned long pfn, flags;
|
||||
void *kaddr;
|
||||
|
||||
pgdir = current->mm->pgd;
|
||||
if (!pgdir)
|
||||
return -EFAULT;
|
||||
|
||||
local_irq_save(flags);
|
||||
ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
|
||||
if (!ptep)
|
||||
goto err_out;
|
||||
if (!shift)
|
||||
shift = PAGE_SHIFT;
|
||||
|
||||
/* align address to page boundary */
|
||||
offset = addr & ((1UL << shift) - 1);
|
||||
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (!pte_present(pte) || !pte_user(pte))
|
||||
goto err_out;
|
||||
pfn = pte_pfn(pte);
|
||||
if (!page_is_ram(pfn))
|
||||
goto err_out;
|
||||
|
||||
/* no highmem to worry about here */
|
||||
kaddr = pfn_to_kaddr(pfn);
|
||||
memcpy(buf, kaddr + offset, nb);
|
||||
ret = 0;
|
||||
err_out:
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
|
||||
((unsigned long)ptr & 7))
|
||||
return -EFAULT;
|
||||
|
||||
if (!probe_user_read(ret, ptr, sizeof(*ret)))
|
||||
return 0;
|
||||
|
||||
return read_user_stack_slow(ptr, ret, 8);
|
||||
}
|
||||
|
||||
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||
((unsigned long)ptr & 3))
|
||||
return -EFAULT;
|
||||
|
||||
if (!probe_user_read(ret, ptr, sizeof(*ret)))
|
||||
return 0;
|
||||
|
||||
return read_user_stack_slow(ptr, ret, 4);
|
||||
}
|
||||
|
||||
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||
{
|
||||
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit user processes use the same stack frame for RT and non-RT signals.
|
||||
*/
|
||||
struct signal_frame_64 {
|
||||
char dummy[__SIGNAL_FRAMESIZE];
|
||||
struct ucontext uc;
|
||||
unsigned long unused[2];
|
||||
unsigned int tramp[6];
|
||||
struct siginfo *pinfo;
|
||||
void *puc;
|
||||
struct siginfo info;
|
||||
char abigap[288];
|
||||
};
|
||||
|
||||
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_64, tramp))
|
||||
return 1;
|
||||
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do some sanity checking on the signal frame pointed to by sp.
|
||||
* We check the pinfo and puc pointers in the frame.
|
||||
*/
|
||||
static int sane_signal_64_frame(unsigned long sp)
|
||||
{
|
||||
struct signal_frame_64 __user *sf;
|
||||
unsigned long pinfo, puc;
|
||||
|
||||
sf = (struct signal_frame_64 __user *) sp;
|
||||
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
|
||||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
|
||||
return 0;
|
||||
return pinfo == (unsigned long) &sf->info &&
|
||||
puc == (unsigned long) &sf->uc;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
unsigned long next_ip;
|
||||
unsigned long lr;
|
||||
long level = 0;
|
||||
struct signal_frame_64 __user *sigframe;
|
||||
unsigned long __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < entry->max_stack) {
|
||||
fp = (unsigned long __user *) sp;
|
||||
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, which can happen when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
|
||||
(is_sigreturn_64_address(next_ip, sp) ||
|
||||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
|
||||
sane_signal_64_frame(sp)) {
|
||||
/*
|
||||
* This looks like an signal frame
|
||||
*/
|
||||
sigframe = (struct signal_frame_64 __user *) sp;
|
||||
uregs = sigframe->uc.uc_mcontext.gp_regs;
|
||||
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_64(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
/*
|
||||
* On 32-bit we just access the address and let hash_page create a
|
||||
* HPTE if necessary, so there is no need to fall back to reading
|
||||
* the page tables. Since this is called at interrupt level,
|
||||
* do_page_fault() won't treat a DSI as a page fault.
|
||||
*/
|
||||
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||
((unsigned long)ptr & 3))
|
||||
return -EFAULT;
|
||||
|
||||
return probe_user_read(ret, ptr, sizeof(*ret));
|
||||
}
|
||||
|
||||
static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int valid_user_sp(unsigned long sp, int is_64)
|
||||
{
|
||||
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
|
||||
#define sigcontext32 sigcontext
|
||||
#define mcontext32 mcontext
|
||||
#define ucontext32 ucontext
|
||||
#define compat_siginfo_t struct siginfo
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/*
|
||||
* Layout for non-RT signal frames
|
||||
*/
|
||||
struct signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32];
|
||||
struct sigcontext32 sctx;
|
||||
struct mcontext32 mctx;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
/*
|
||||
* Layout for RT signal frames
|
||||
*/
|
||||
struct rt_signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32 + 16];
|
||||
compat_siginfo_t info;
|
||||
struct ucontext32 uc;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct rt_signal_frame_32,
|
||||
uc.uc_mcontext.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sane_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->mctx;
|
||||
}
|
||||
|
||||
static int sane_rt_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct rt_signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
||||
unsigned int next_sp, unsigned int next_ip)
|
||||
{
|
||||
struct mcontext32 __user *mctx = NULL;
|
||||
struct signal_frame_32 __user *sf;
|
||||
struct rt_signal_frame_32 __user *rt_sf;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, for example, when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
|
||||
is_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_signal_32_frame(sp)) {
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &sf->mctx;
|
||||
}
|
||||
|
||||
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
|
||||
is_rt_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_rt_signal_32_frame(sp)) {
|
||||
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &rt_sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
if (!mctx)
|
||||
return NULL;
|
||||
return mctx->mc_gregs;
|
||||
}
|
||||
|
||||
static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned int sp, next_sp;
|
||||
unsigned int next_ip;
|
||||
unsigned int lr;
|
||||
long level = 0;
|
||||
unsigned int __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < entry->max_stack) {
|
||||
fp = (unsigned int __user *) (unsigned long) sp;
|
||||
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
|
||||
return;
|
||||
|
||||
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
|
||||
if (!uregs && level <= 1)
|
||||
uregs = signal_frame_32_regs(sp, next_sp, lr);
|
||||
if (uregs) {
|
||||
/*
|
||||
* This looks like an signal frame, so restart
|
||||
* the stack trace with the values in it.
|
||||
*/
|
||||
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_32(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
|
||||
{
|
||||
|
19
arch/powerpc/perf/callchain.h
Normal file
19
arch/powerpc/perf/callchain.h
Normal file
@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
#ifndef _POWERPC_PERF_CALLCHAIN_H
|
||||
#define _POWERPC_PERF_CALLCHAIN_H
|
||||
|
||||
int read_user_stack_slow(void __user *ptr, void *buf, int nb);
|
||||
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs);
|
||||
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs);
|
||||
|
||||
static inline bool invalid_user_sp(unsigned long sp)
|
||||
{
|
||||
unsigned long mask = is_32bit_task() ? 3 : 7;
|
||||
unsigned long top = STACK_TOP - (is_32bit_task() ? 16 : 32);
|
||||
|
||||
return (!sp || (sp & mask) || (sp > top));
|
||||
}
|
||||
|
||||
#endif /* _POWERPC_PERF_CALLCHAIN_H */
|
196
arch/powerpc/perf/callchain_32.c
Normal file
196
arch/powerpc/perf/callchain_32.c
Normal file
@ -0,0 +1,196 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Performance counter callchain support - powerpc architecture code
|
||||
*
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corporation.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#include "callchain.h"
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include "../kernel/ppc32.h"
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
||||
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
|
||||
#define sigcontext32 sigcontext
|
||||
#define mcontext32 mcontext
|
||||
#define ucontext32 ucontext
|
||||
#define compat_siginfo_t struct siginfo
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/*
|
||||
* On 32-bit we just access the address and let hash_page create a
|
||||
* HPTE if necessary, so there is no need to fall back to reading
|
||||
* the page tables. Since this is called at interrupt level,
|
||||
* do_page_fault() won't treat a DSI as a page fault.
|
||||
*/
|
||||
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
|
||||
((unsigned long)ptr & 3))
|
||||
return -EFAULT;
|
||||
|
||||
rc = probe_user_read(ret, ptr, sizeof(*ret));
|
||||
|
||||
if (IS_ENABLED(CONFIG_PPC64) && rc)
|
||||
return read_user_stack_slow(ptr, ret, 4);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Layout for non-RT signal frames
|
||||
*/
|
||||
struct signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32];
|
||||
struct sigcontext32 sctx;
|
||||
struct mcontext32 mctx;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
/*
|
||||
* Layout for RT signal frames
|
||||
*/
|
||||
struct rt_signal_frame_32 {
|
||||
char dummy[__SIGNAL_FRAMESIZE32 + 16];
|
||||
compat_siginfo_t info;
|
||||
struct ucontext32 uc;
|
||||
int abigap[56];
|
||||
};
|
||||
|
||||
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct rt_signal_frame_32,
|
||||
uc.uc_mcontext.mc_pad))
|
||||
return 1;
|
||||
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sane_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->mctx;
|
||||
}
|
||||
|
||||
static int sane_rt_signal_32_frame(unsigned int sp)
|
||||
{
|
||||
struct rt_signal_frame_32 __user *sf;
|
||||
unsigned int regs;
|
||||
|
||||
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
|
||||
return 0;
|
||||
return regs == (unsigned long) &sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
|
||||
unsigned int next_sp, unsigned int next_ip)
|
||||
{
|
||||
struct mcontext32 __user *mctx = NULL;
|
||||
struct signal_frame_32 __user *sf;
|
||||
struct rt_signal_frame_32 __user *rt_sf;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, for example, when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
|
||||
is_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_signal_32_frame(sp)) {
|
||||
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &sf->mctx;
|
||||
}
|
||||
|
||||
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
|
||||
is_rt_sigreturn_32_address(next_ip, sp) &&
|
||||
sane_rt_signal_32_frame(sp)) {
|
||||
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
|
||||
mctx = &rt_sf->uc.uc_mcontext;
|
||||
}
|
||||
|
||||
if (!mctx)
|
||||
return NULL;
|
||||
return mctx->mc_gregs;
|
||||
}
|
||||
|
||||
void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned int sp, next_sp;
|
||||
unsigned int next_ip;
|
||||
unsigned int lr;
|
||||
long level = 0;
|
||||
unsigned int __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < entry->max_stack) {
|
||||
fp = (unsigned int __user *) (unsigned long) sp;
|
||||
if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
|
||||
return;
|
||||
|
||||
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
|
||||
if (!uregs && level <= 1)
|
||||
uregs = signal_frame_32_regs(sp, next_sp, lr);
|
||||
if (uregs) {
|
||||
/*
|
||||
* This looks like an signal frame, so restart
|
||||
* the stack trace with the values in it.
|
||||
*/
|
||||
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_32(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
174
arch/powerpc/perf/callchain_64.c
Normal file
174
arch/powerpc/perf/callchain_64.c
Normal file
@ -0,0 +1,174 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Performance counter callchain support - powerpc architecture code
|
||||
*
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corporation.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sigcontext.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/pte-walk.h>
|
||||
|
||||
#include "callchain.h"
|
||||
|
||||
/*
|
||||
* On 64-bit we don't want to invoke hash_page on user addresses from
|
||||
* interrupt context, so if the access faults, we read the page tables
|
||||
* to find which page (if any) is mapped and access it directly.
|
||||
*/
|
||||
int read_user_stack_slow(void __user *ptr, void *buf, int nb)
|
||||
{
|
||||
int ret = -EFAULT;
|
||||
pgd_t *pgdir;
|
||||
pte_t *ptep, pte;
|
||||
unsigned int shift;
|
||||
unsigned long addr = (unsigned long) ptr;
|
||||
unsigned long offset;
|
||||
unsigned long pfn, flags;
|
||||
void *kaddr;
|
||||
|
||||
pgdir = current->mm->pgd;
|
||||
if (!pgdir)
|
||||
return -EFAULT;
|
||||
|
||||
local_irq_save(flags);
|
||||
ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
|
||||
if (!ptep)
|
||||
goto err_out;
|
||||
if (!shift)
|
||||
shift = PAGE_SHIFT;
|
||||
|
||||
/* align address to page boundary */
|
||||
offset = addr & ((1UL << shift) - 1);
|
||||
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (!pte_present(pte) || !pte_user(pte))
|
||||
goto err_out;
|
||||
pfn = pte_pfn(pte);
|
||||
if (!page_is_ram(pfn))
|
||||
goto err_out;
|
||||
|
||||
/* no highmem to worry about here */
|
||||
kaddr = pfn_to_kaddr(pfn);
|
||||
memcpy(buf, kaddr + offset, nb);
|
||||
ret = 0;
|
||||
err_out:
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
|
||||
{
|
||||
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
|
||||
((unsigned long)ptr & 7))
|
||||
return -EFAULT;
|
||||
|
||||
if (!probe_user_read(ret, ptr, sizeof(*ret)))
|
||||
return 0;
|
||||
|
||||
return read_user_stack_slow(ptr, ret, 8);
|
||||
}
|
||||
|
||||
/*
|
||||
* 64-bit user processes use the same stack frame for RT and non-RT signals.
|
||||
*/
|
||||
struct signal_frame_64 {
|
||||
char dummy[__SIGNAL_FRAMESIZE];
|
||||
struct ucontext uc;
|
||||
unsigned long unused[2];
|
||||
unsigned int tramp[6];
|
||||
struct siginfo *pinfo;
|
||||
void *puc;
|
||||
struct siginfo info;
|
||||
char abigap[288];
|
||||
};
|
||||
|
||||
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
|
||||
{
|
||||
if (nip == fp + offsetof(struct signal_frame_64, tramp))
|
||||
return 1;
|
||||
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
|
||||
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do some sanity checking on the signal frame pointed to by sp.
|
||||
* We check the pinfo and puc pointers in the frame.
|
||||
*/
|
||||
static int sane_signal_64_frame(unsigned long sp)
|
||||
{
|
||||
struct signal_frame_64 __user *sf;
|
||||
unsigned long pinfo, puc;
|
||||
|
||||
sf = (struct signal_frame_64 __user *) sp;
|
||||
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
|
||||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
|
||||
return 0;
|
||||
return pinfo == (unsigned long) &sf->info &&
|
||||
puc == (unsigned long) &sf->uc;
|
||||
}
|
||||
|
||||
void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long sp, next_sp;
|
||||
unsigned long next_ip;
|
||||
unsigned long lr;
|
||||
long level = 0;
|
||||
struct signal_frame_64 __user *sigframe;
|
||||
unsigned long __user *fp, *uregs;
|
||||
|
||||
next_ip = perf_instruction_pointer(regs);
|
||||
lr = regs->link;
|
||||
sp = regs->gpr[1];
|
||||
perf_callchain_store(entry, next_ip);
|
||||
|
||||
while (entry->nr < entry->max_stack) {
|
||||
fp = (unsigned long __user *) sp;
|
||||
if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
|
||||
return;
|
||||
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Note: the next_sp - sp >= signal frame size check
|
||||
* is true when next_sp < sp, which can happen when
|
||||
* transitioning from an alternate signal stack to the
|
||||
* normal stack.
|
||||
*/
|
||||
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
|
||||
(is_sigreturn_64_address(next_ip, sp) ||
|
||||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
|
||||
sane_signal_64_frame(sp)) {
|
||||
/*
|
||||
* This looks like an signal frame
|
||||
*/
|
||||
sigframe = (struct signal_frame_64 __user *) sp;
|
||||
uregs = sigframe->uc.uc_mcontext.gp_regs;
|
||||
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
|
||||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
|
||||
read_user_stack_64(&uregs[PT_R1], &sp))
|
||||
return;
|
||||
level = 0;
|
||||
perf_callchain_store_context(entry, PERF_CONTEXT_USER);
|
||||
perf_callchain_store(entry, next_ip);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (level == 0)
|
||||
next_ip = lr;
|
||||
perf_callchain_store(entry, next_ip);
|
||||
++level;
|
||||
sp = next_sp;
|
||||
}
|
||||
}
|
@ -44,6 +44,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem);
|
||||
static struct imc_pmu_ref *trace_imc_refc;
|
||||
static int trace_imc_mem_size;
|
||||
|
||||
/*
|
||||
* Global data structure used to avoid races between thread,
|
||||
* core and trace-imc
|
||||
*/
|
||||
static struct imc_pmu_ref imc_global_refc = {
|
||||
.lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
|
||||
.id = 0,
|
||||
.refc = 0,
|
||||
};
|
||||
|
||||
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
|
||||
{
|
||||
return container_of(event->pmu, struct imc_pmu, pmu);
|
||||
@ -698,6 +708,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
|
||||
return -EINVAL;
|
||||
|
||||
ref->refc = 0;
|
||||
/*
|
||||
* Reduce the global reference count, if this is the
|
||||
* last cpu in this core and core-imc event running
|
||||
* in this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_CORE)
|
||||
imc_global_refc.refc--;
|
||||
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -710,6 +730,23 @@ static int core_imc_pmu_cpumask_init(void)
|
||||
ppc_core_imc_cpu_offline);
|
||||
}
|
||||
|
||||
static void reset_global_refc(struct perf_event *event)
|
||||
{
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
imc_global_refc.refc--;
|
||||
|
||||
/*
|
||||
* If no other thread is running any
|
||||
* event for this domain(thread/core/trace),
|
||||
* set the global id to zero.
|
||||
*/
|
||||
if (imc_global_refc.refc <= 0) {
|
||||
imc_global_refc.refc = 0;
|
||||
imc_global_refc.id = 0;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
}
|
||||
|
||||
static void core_imc_counters_release(struct perf_event *event)
|
||||
{
|
||||
int rc, core_id;
|
||||
@ -759,6 +796,8 @@ static void core_imc_counters_release(struct perf_event *event)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
reset_global_refc(event);
|
||||
}
|
||||
|
||||
static int core_imc_event_init(struct perf_event *event)
|
||||
@ -819,6 +858,29 @@ static int core_imc_event_init(struct perf_event *event)
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
/*
|
||||
* Since the system can run either in accumulation or trace-mode
|
||||
* of IMC at a time, core-imc events are allowed only if no other
|
||||
* trace/thread imc events are enabled/monitored.
|
||||
*
|
||||
* Take the global lock, and check the refc.id
|
||||
* to know whether any other trace/thread imc
|
||||
* events are running.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
|
||||
/*
|
||||
* No other trace/thread imc events are running in
|
||||
* the system, so set the refc.id to core-imc.
|
||||
*/
|
||||
imc_global_refc.id = IMC_DOMAIN_CORE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
|
||||
event->destroy = core_imc_counters_release;
|
||||
return 0;
|
||||
@ -877,7 +939,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu)
|
||||
|
||||
static int ppc_thread_imc_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
/*
|
||||
* Set the bit 0 of LDBAR to zero.
|
||||
*
|
||||
* If bit 0 of LDBAR is unset, it will stop posting
|
||||
* the counter data to memory.
|
||||
* For thread-imc, bit 0 of LDBAR will be set to 1 in the
|
||||
* event_add function. So reset this bit here, to stop the updates
|
||||
* to memory in the cpu_offline path.
|
||||
*/
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/* Reduce the refc if thread-imc event running on this cpu */
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_THREAD)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -916,7 +994,22 @@ static int thread_imc_event_init(struct perf_event *event)
|
||||
if (!target)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
/*
|
||||
* Check if any other trace/core imc events are running in the
|
||||
* system, if not set the global id to thread-imc.
|
||||
*/
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
|
||||
imc_global_refc.id = IMC_DOMAIN_THREAD;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->pmu->task_ctx_nr = perf_sw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1063,10 +1156,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
int core_id;
|
||||
struct imc_pmu_ref *ref;
|
||||
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
|
||||
core_id = smp_processor_id() / threads_per_core;
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
pr_debug("imc: Failed to get event reference count\n");
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
@ -1082,6 +1177,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
/* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
|
||||
/*
|
||||
* Take a snapshot and calculate the delta and update
|
||||
* the event counter values.
|
||||
@ -1133,7 +1232,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu)
|
||||
|
||||
static int ppc_trace_imc_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
/*
|
||||
* No need to set bit 0 of LDBAR to zero, as
|
||||
* it is set to zero for imc trace-mode
|
||||
*
|
||||
* Reduce the refc if any trace-imc event running
|
||||
* on this cpu.
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == IMC_DOMAIN_TRACE)
|
||||
imc_global_refc.refc--;
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1226,15 +1336,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
||||
local_mem = get_trace_imc_event_base_addr();
|
||||
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
|
||||
|
||||
if (core_imc_refc)
|
||||
ref = &core_imc_refc[core_id];
|
||||
/* trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
/* If core-imc is not enabled, use trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return -EINVAL;
|
||||
pr_debug("imc: Failed to get the event reference count\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mtspr(SPRN_LDBAR, ldbar_value);
|
||||
mutex_lock(&ref->lock);
|
||||
if (ref->refc == 0) {
|
||||
@ -1242,13 +1351,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
|
||||
get_hard_smp_processor_id(smp_processor_id()))) {
|
||||
mutex_unlock(&ref->lock);
|
||||
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
++ref->refc;
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1274,16 +1381,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
||||
int core_id = smp_processor_id() / threads_per_core;
|
||||
struct imc_pmu_ref *ref = NULL;
|
||||
|
||||
if (core_imc_refc)
|
||||
ref = &core_imc_refc[core_id];
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref) {
|
||||
/* If core-imc is not enabled, use trace-imc reference count */
|
||||
if (trace_imc_refc)
|
||||
ref = &trace_imc_refc[core_id];
|
||||
if (!ref)
|
||||
return;
|
||||
pr_debug("imc: Failed to get event reference count\n");
|
||||
return;
|
||||
}
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
|
||||
mutex_lock(&ref->lock);
|
||||
ref->refc--;
|
||||
if (ref->refc == 0) {
|
||||
@ -1297,6 +1401,7 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
|
||||
ref->refc = 0;
|
||||
}
|
||||
mutex_unlock(&ref->lock);
|
||||
|
||||
trace_imc_event_stop(event, flags);
|
||||
}
|
||||
|
||||
@ -1314,10 +1419,30 @@ static int trace_imc_event_init(struct perf_event *event)
|
||||
if (event->attr.sample_period == 0)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Take the global lock, and make sure
|
||||
* no other thread is running any core/thread imc
|
||||
* events
|
||||
*/
|
||||
mutex_lock(&imc_global_refc.lock);
|
||||
if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
|
||||
/*
|
||||
* No core/thread imc events are running in the
|
||||
* system, so set the refc.id to trace-imc.
|
||||
*/
|
||||
imc_global_refc.id = IMC_DOMAIN_TRACE;
|
||||
imc_global_refc.refc++;
|
||||
} else {
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
mutex_unlock(&imc_global_refc.lock);
|
||||
|
||||
event->hw.idx = -1;
|
||||
target = event->hw.target;
|
||||
|
||||
event->pmu->task_ctx_nr = perf_hw_context;
|
||||
event->destroy = reset_global_refc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1429,10 +1554,10 @@ static void cleanup_all_core_imc_memory(void)
|
||||
static void thread_imc_ldbar_disable(void *dummy)
|
||||
{
|
||||
/*
|
||||
* By Zeroing LDBAR, we disable thread-imc
|
||||
* updates.
|
||||
* By setting 0th bit of LDBAR to zero, we disable thread-imc
|
||||
* updates to memory.
|
||||
*/
|
||||
mtspr(SPRN_LDBAR, 0);
|
||||
mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
|
||||
}
|
||||
|
||||
void thread_imc_disable(void)
|
||||
|
@ -268,14 +268,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
|
||||
domain = IMC_DOMAIN_THREAD;
|
||||
break;
|
||||
case IMC_TYPE_TRACE:
|
||||
/*
|
||||
* FIXME. Using trace_imc events to monitor application
|
||||
* or KVM thread performance can cause a checkstop
|
||||
* (system crash).
|
||||
* Disable it for now.
|
||||
*/
|
||||
pr_info_once("IMC: disabling trace_imc PMU\n");
|
||||
domain = -1;
|
||||
domain = IMC_DOMAIN_TRACE;
|
||||
break;
|
||||
default:
|
||||
pr_warn("IMC Unknown Device type \n");
|
||||
|
@ -613,10 +613,8 @@ static int update_flash_db(void)
|
||||
/* Read in header and db from flash. */
|
||||
|
||||
header = kmalloc(buf_len, GFP_KERNEL);
|
||||
if (!header) {
|
||||
pr_debug("%s: kmalloc failed\n", __func__);
|
||||
if (!header)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
count = os_area_flash_read(header, buf_len, 0);
|
||||
if (count < 0) {
|
||||
|
@ -945,6 +945,15 @@ static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
phys_addr_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
|
||||
/*
|
||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||
* Assuming it is still backed by page structs, set the upper limit
|
||||
* for the huge DMA window as MAX_PHYSMEM_BITS.
|
||||
*/
|
||||
if (of_find_node_by_type(NULL, "ibm,pmemory"))
|
||||
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
|
||||
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
unsigned long start, size;
|
||||
int n_mem_addr_cells, n_mem_size_cells, len;
|
||||
|
@ -686,6 +686,17 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
|
||||
#endif
|
||||
|
||||
out:
|
||||
/*
|
||||
* Enable translation as we will be accessing per-cpu variables
|
||||
* in save_mce_event() which may fall outside RMO region, also
|
||||
* leave it enabled because subsequently we will be queuing work
|
||||
* to workqueues where again per-cpu variables accessed, besides
|
||||
* fwnmi_release_errinfo() crashes when called in realmode on
|
||||
* pseries.
|
||||
* Note: All the realmode handling like flushing SLB entries for
|
||||
* SLB multihit is done by now.
|
||||
*/
|
||||
mtmsr(mfmsr() | MSR_IR | MSR_DR);
|
||||
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
|
||||
&mce_err, regs->nip, eaddr, paddr);
|
||||
|
||||
|
@ -20,7 +20,6 @@ config RISCV
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_PCI_IOMAP
|
||||
select GENERIC_SCHED_CLOCK
|
||||
@ -29,6 +28,7 @@ config RISCV
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_ATOMIC64 if !64BIT
|
||||
select GENERIC_IOREMAP
|
||||
select GENERIC_PTDUMP if MMU
|
||||
select HAVE_ARCH_AUDITSYSCALL
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
@ -58,6 +58,9 @@ config RISCV
|
||||
select HAVE_EBPF_JIT
|
||||
select EDAC_SUPPORT
|
||||
select ARCH_HAS_GIGANTIC_PAGE
|
||||
select ARCH_HAS_SET_DIRECT_MAP
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
|
||||
select SPARSEMEM_STATIC if 32BIT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
@ -129,6 +132,9 @@ config ARCH_SELECT_MEMORY_MODEL
|
||||
config ARCH_WANT_GENERAL_HUGETLB
|
||||
def_bool y
|
||||
|
||||
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
def_bool y
|
||||
|
||||
config SYS_SUPPORTS_HUGETLBFS
|
||||
def_bool y
|
||||
|
||||
@ -247,6 +253,17 @@ config NR_CPUS
|
||||
depends on SMP
|
||||
default "8"
|
||||
|
||||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs"
|
||||
depends on SMP
|
||||
select GENERIC_IRQ_MIGRATION
|
||||
help
|
||||
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
||||
Say N if you want to disable CPU hotplug.
|
||||
|
||||
choice
|
||||
prompt "CPU Tuning"
|
||||
default TUNE_GENERIC
|
||||
@ -307,6 +324,13 @@ config SECCOMP
|
||||
and the task is only allowed to execute a few safe syscalls
|
||||
defined by each seccomp mode.
|
||||
|
||||
config RISCV_SBI_V01
|
||||
bool "SBI v0.1 support"
|
||||
default y
|
||||
depends on RISCV_SBI
|
||||
help
|
||||
This config allows kernel to use SBI v0.1 APIs. This will be
|
||||
deprecated in future once legacy M-mode software are no longer in use.
|
||||
endmenu
|
||||
|
||||
menu "Boot options"
|
||||
|
@ -20,4 +20,14 @@ config SOC_VIRT
|
||||
help
|
||||
This enables support for QEMU Virt Machine.
|
||||
|
||||
config SOC_KENDRYTE
|
||||
bool "Kendryte K210 SoC"
|
||||
depends on !MMU
|
||||
select BUILTIN_DTB
|
||||
select SERIAL_SIFIVE if TTY
|
||||
select SERIAL_SIFIVE_CONSOLE if TTY
|
||||
select SIFIVE_PLIC
|
||||
help
|
||||
This enables support for Kendryte K210 SoC platform hardware.
|
||||
|
||||
endmenu
|
||||
|
@ -85,12 +85,12 @@ PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||
|
||||
ifeq ($(CONFIG_RISCV_M_MODE),y)
|
||||
KBUILD_IMAGE := $(boot)/loader
|
||||
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_KENDRYTE),yy)
|
||||
KBUILD_IMAGE := $(boot)/loader.bin
|
||||
else
|
||||
KBUILD_IMAGE := $(boot)/Image.gz
|
||||
endif
|
||||
BOOT_TARGETS := Image Image.gz loader
|
||||
BOOT_TARGETS := Image Image.gz loader loader.bin
|
||||
|
||||
all: $(notdir $(KBUILD_IMAGE))
|
||||
|
||||
|
@ -41,6 +41,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE
|
||||
$(obj)/Image.lzo: $(obj)/Image FORCE
|
||||
$(call if_changed,lzo)
|
||||
|
||||
$(obj)/loader.bin: $(obj)/loader FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
install:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
$(obj)/Image System.map "$(INSTALL_PATH)"
|
||||
|
@ -1,2 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
subdir-y += sifive
|
||||
subdir-y += kendryte
|
||||
|
2
arch/riscv/boot/dts/kendryte/Makefile
Normal file
2
arch/riscv/boot/dts/kendryte/Makefile
Normal file
@ -0,0 +1,2 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
dtb-$(CONFIG_SOC_KENDRYTE) += k210.dtb
|
23
arch/riscv/boot/dts/kendryte/k210.dts
Normal file
23
arch/riscv/boot/dts/kendryte/k210.dts
Normal file
@ -0,0 +1,23 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
#include "k210.dtsi"
|
||||
|
||||
/ {
|
||||
model = "Kendryte K210 generic";
|
||||
compatible = "kendryte,k210";
|
||||
|
||||
chosen {
|
||||
bootargs = "earlycon console=ttySIF0";
|
||||
stdout-path = "serial0";
|
||||
};
|
||||
};
|
||||
|
||||
&uarths0 {
|
||||
status = "okay";
|
||||
};
|
||||
|
123
arch/riscv/boot/dts/kendryte/k210.dtsi
Normal file
123
arch/riscv/boot/dts/kendryte/k210.dtsi
Normal file
@ -0,0 +1,123 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#include <dt-bindings/clock/k210-clk.h>
|
||||
|
||||
/ {
|
||||
/*
|
||||
* Although the K210 is a 64-bit CPU, the address bus is only 32-bits
|
||||
* wide, and the upper half of all addresses is ignored.
|
||||
*/
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "kendryte,k210";
|
||||
|
||||
aliases {
|
||||
serial0 = &uarths0;
|
||||
};
|
||||
|
||||
/*
|
||||
* The K210 has an sv39 MMU following the priviledge specification v1.9.
|
||||
* Since this is a non-ratified draft specification, the kernel does not
|
||||
* support it and the K210 support enabled only for the !MMU case.
|
||||
* Be consistent with this by setting the CPUs MMU type to "none".
|
||||
*/
|
||||
cpus {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
timebase-frequency = <7800000>;
|
||||
cpu0: cpu@0 {
|
||||
device_type = "cpu";
|
||||
reg = <0>;
|
||||
compatible = "kendryte,k210", "sifive,rocket0", "riscv";
|
||||
riscv,isa = "rv64imafdc";
|
||||
mmu-type = "none";
|
||||
i-cache-size = <0x8000>;
|
||||
i-cache-block-size = <64>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-block-size = <64>;
|
||||
clocks = <&sysctl K210_CLK_CPU>;
|
||||
clock-frequency = <390000000>;
|
||||
cpu0_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
compatible = "riscv,cpu-intc";
|
||||
};
|
||||
};
|
||||
cpu1: cpu@1 {
|
||||
device_type = "cpu";
|
||||
reg = <1>;
|
||||
compatible = "kendryte,k210", "sifive,rocket0", "riscv";
|
||||
riscv,isa = "rv64imafdc";
|
||||
mmu-type = "none";
|
||||
i-cache-size = <0x8000>;
|
||||
i-cache-block-size = <64>;
|
||||
d-cache-size = <0x8000>;
|
||||
d-cache-block-size = <64>;
|
||||
clocks = <&sysctl K210_CLK_CPU>;
|
||||
clock-frequency = <390000000>;
|
||||
cpu1_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
compatible = "riscv,cpu-intc";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sram: memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x400000>,
|
||||
<0x80400000 0x200000>,
|
||||
<0x80600000 0x200000>;
|
||||
reg-names = "sram0", "sram1", "aisram";
|
||||
};
|
||||
|
||||
clocks {
|
||||
in0: oscillator {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <26000000>;
|
||||
};
|
||||
};
|
||||
|
||||
soc {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "kendryte,k210-soc", "simple-bus";
|
||||
ranges;
|
||||
interrupt-parent = <&plic0>;
|
||||
|
||||
sysctl: sysctl@50440000 {
|
||||
compatible = "kendryte,k210-sysctl", "simple-mfd";
|
||||
reg = <0x50440000 0x1000>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
|
||||
clint0: interrupt-controller@2000000 {
|
||||
compatible = "riscv,clint0";
|
||||
reg = <0x2000000 0xC000>;
|
||||
interrupts-extended = <&cpu0_intc 3>, <&cpu1_intc 3>;
|
||||
clocks = <&sysctl K210_CLK_ACLK>;
|
||||
};
|
||||
|
||||
plic0: interrupt-controller@c000000 {
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-controller;
|
||||
compatible = "kendryte,k210-plic0", "riscv,plic0";
|
||||
reg = <0xC000000 0x4000000>;
|
||||
interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 0xffffffff>,
|
||||
<&cpu1_intc 11>, <&cpu1_intc 0xffffffff>;
|
||||
riscv,ndev = <65>;
|
||||
riscv,max-priority = <7>;
|
||||
};
|
||||
|
||||
uarths0: serial@38000000 {
|
||||
compatible = "kendryte,k210-uarths", "sifive,uart0";
|
||||
reg = <0x38000000 0x1000>;
|
||||
interrupts = <33>;
|
||||
clocks = <&sysctl K210_CLK_CPU>;
|
||||
};
|
||||
};
|
||||
};
|
@ -128,3 +128,4 @@ CONFIG_DEBUG_BLOCK_EXT_DEVT=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
||||
CONFIG_MEMTEST=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
|
68
arch/riscv/configs/nommu_k210_defconfig
Normal file
68
arch/riscv/configs/nommu_k210_defconfig
Normal file
@ -0,0 +1,68 @@
|
||||
# CONFIG_CPU_ISOLATION is not set
|
||||
CONFIG_LOG_BUF_SHIFT=15
|
||||
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_INITRAMFS_SOURCE=""
|
||||
CONFIG_INITRAMFS_FORCE=y
|
||||
# CONFIG_RD_BZIP2 is not set
|
||||
# CONFIG_RD_LZMA is not set
|
||||
# CONFIG_RD_XZ is not set
|
||||
# CONFIG_RD_LZO is not set
|
||||
# CONFIG_RD_LZ4 is not set
|
||||
# CONFIG_BOOT_CONFIG is not set
|
||||
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
# CONFIG_FHANDLE is not set
|
||||
# CONFIG_BASE_FULL is not set
|
||||
# CONFIG_EPOLL is not set
|
||||
# CONFIG_SIGNALFD is not set
|
||||
# CONFIG_TIMERFD is not set
|
||||
# CONFIG_EVENTFD is not set
|
||||
# CONFIG_AIO is not set
|
||||
# CONFIG_IO_URING is not set
|
||||
# CONFIG_ADVISE_SYSCALLS is not set
|
||||
# CONFIG_MEMBARRIER is not set
|
||||
# CONFIG_KALLSYMS is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_SLOB=y
|
||||
# CONFIG_SLAB_MERGE_DEFAULT is not set
|
||||
# CONFIG_MMU is not set
|
||||
CONFIG_SOC_KENDRYTE=y
|
||||
CONFIG_MAXPHYSMEM_2GB=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_NR_CPUS=2
|
||||
CONFIG_CMDLINE="earlycon console=ttySIF0"
|
||||
CONFIG_CMDLINE_FORCE=y
|
||||
CONFIG_USE_BUILTIN_DTB=y
|
||||
CONFIG_BUILTIN_DTB_SOURCE="kendryte/k210"
|
||||
# CONFIG_BLOCK is not set
|
||||
CONFIG_BINFMT_FLAT=y
|
||||
# CONFIG_COREDUMP is not set
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_DEVTMPFS_MOUNT=y
|
||||
# CONFIG_FW_LOADER is not set
|
||||
# CONFIG_ALLOW_DEV_COREDUMP is not set
|
||||
# CONFIG_INPUT_KEYBOARD is not set
|
||||
# CONFIG_INPUT_MOUSE is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_LEGACY_PTYS is not set
|
||||
# CONFIG_LDISC_AUTOLOAD is not set
|
||||
# CONFIG_DEVMEM is not set
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
# CONFIG_HWMON is not set
|
||||
# CONFIG_VGA_CONSOLE is not set
|
||||
# CONFIG_HID is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
# CONFIG_VIRTIO_MENU is not set
|
||||
# CONFIG_DNOTIFY is not set
|
||||
# CONFIG_INOTIFY_USER is not set
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
CONFIG_LSM="[]"
|
||||
CONFIG_PRINTK_TIME=y
|
||||
# CONFIG_DEBUG_MISC is not set
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
@ -124,3 +124,4 @@ CONFIG_DEBUG_BLOCK_EXT_DEVT=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_RUNTIME_TESTING_MENU is not set
|
||||
CONFIG_MEMTEST=y
|
||||
# CONFIG_SYSFS_SYSCALL is not set
|
||||
|
@ -19,6 +19,14 @@
|
||||
#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
|
||||
#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
|
||||
|
||||
#define GET_INSN_LENGTH(insn) \
|
||||
({ \
|
||||
unsigned long __len; \
|
||||
__len = ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? \
|
||||
4UL : 2UL; \
|
||||
__len; \
|
||||
})
|
||||
|
||||
typedef u32 bug_insn_t;
|
||||
|
||||
#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
|
||||
|
@ -85,7 +85,7 @@ static inline void flush_dcache_page(struct page *page)
|
||||
* so instead we just flush the whole thing.
|
||||
*/
|
||||
#define flush_icache_range(start, end) flush_icache_all()
|
||||
#define flush_icache_user_range(vma, pg, addr, len) flush_icache_all()
|
||||
#define flush_icache_user_range(vma, pg, addr, len) flush_icache_mm(vma->vm_mm, 0)
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
|
||||
|
46
arch/riscv/include/asm/cpu_ops.h
Normal file
46
arch/riscv/include/asm/cpu_ops.h
Normal file
@ -0,0 +1,46 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
* Based on arch/arm64/include/asm/cpu_ops.h
|
||||
*/
|
||||
#ifndef __ASM_CPU_OPS_H
|
||||
#define __ASM_CPU_OPS_H
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/threads.h>
|
||||
|
||||
/**
|
||||
* struct cpu_operations - Callback operations for hotplugging CPUs.
|
||||
*
|
||||
* @name: Name of the boot protocol.
|
||||
* @cpu_prepare: Early one-time preparation step for a cpu. If there
|
||||
* is a mechanism for doing so, tests whether it is
|
||||
* possible to boot the given HART.
|
||||
* @cpu_start: Boots a cpu into the kernel.
|
||||
* @cpu_disable: Prepares a cpu to die. May fail for some
|
||||
* mechanism-specific reason, which will cause the hot
|
||||
* unplug to be aborted. Called from the cpu to be killed.
|
||||
* @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
|
||||
* the cpu being stopped.
|
||||
* @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
|
||||
* cpu.
|
||||
*/
|
||||
struct cpu_operations {
|
||||
const char *name;
|
||||
int (*cpu_prepare)(unsigned int cpu);
|
||||
int (*cpu_start)(unsigned int cpu,
|
||||
struct task_struct *tidle);
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int (*cpu_disable)(unsigned int cpu);
|
||||
void (*cpu_stop)(void);
|
||||
int (*cpu_is_stopped)(unsigned int cpu);
|
||||
#endif
|
||||
};
|
||||
|
||||
extern const struct cpu_operations *cpu_ops[NR_CPUS];
|
||||
void __init cpu_set_ops(int cpu);
|
||||
void cpu_update_secondary_bootdata(unsigned int cpuid,
|
||||
struct task_struct *tidle);
|
||||
|
||||
#endif /* ifndef __ASM_CPU_OPS_H */
|
@ -17,6 +17,8 @@
|
||||
|
||||
struct task_struct;
|
||||
|
||||
register struct task_struct *riscv_current_is_tp __asm__("tp");
|
||||
|
||||
/*
|
||||
* This only works because "struct thread_info" is at offset 0 from "struct
|
||||
* task_struct". This constraint seems to be necessary on other architectures
|
||||
@ -26,8 +28,7 @@ struct task_struct;
|
||||
*/
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
register struct task_struct *tp __asm__("tp");
|
||||
return tp;
|
||||
return riscv_current_is_tp;
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
|
@ -27,6 +27,8 @@ enum fixed_addresses {
|
||||
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
|
||||
FIX_PTE,
|
||||
FIX_PMD,
|
||||
FIX_TEXT_POKE1,
|
||||
FIX_TEXT_POKE0,
|
||||
FIX_EARLYCON_MEM_BASE,
|
||||
__end_of_fixed_addresses
|
||||
};
|
||||
|
@ -13,7 +13,7 @@
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#define KASAN_SHADOW_START 0xffffffc000000000 /* 2^64 - 2^38 */
|
||||
#define KASAN_SHADOW_START KERN_VIRT_START /* 2^64 - 2^38 */
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
||||
|
12
arch/riscv/include/asm/patch.h
Normal file
12
arch/riscv/include/asm/patch.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_PATCH_H
|
||||
#define _ASM_RISCV_PATCH_H
|
||||
|
||||
int riscv_patch_text_nosync(void *addr, const void *insns, size_t len);
|
||||
int riscv_patch_text(void *addr, u32 insn);
|
||||
|
||||
#endif /* _ASM_RISCV_PATCH_H */
|
@ -448,6 +448,16 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
|
||||
|
||||
/*
|
||||
* In the RV64 Linux scheme, we give the user half of the virtual-address space
|
||||
* and give the kernel the other (upper) half.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
|
||||
#else
|
||||
#define KERN_VIRT_START FIXADDR_START
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
|
||||
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
|
||||
|
11
arch/riscv/include/asm/ptdump.h
Normal file
11
arch/riscv/include/asm/ptdump.h
Normal file
@ -0,0 +1,11 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2019 SiFive
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_PTDUMP_H
|
||||
#define _ASM_RISCV_PTDUMP_H
|
||||
|
||||
void ptdump_check_wx(void);
|
||||
|
||||
#endif /* _ASM_RISCV_PTDUMP_H */
|
@ -1,6 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2015 Regents of the University of California
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_SBI_H
|
||||
@ -9,96 +10,148 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_SBI
|
||||
#define SBI_SET_TIMER 0
|
||||
#define SBI_CONSOLE_PUTCHAR 1
|
||||
#define SBI_CONSOLE_GETCHAR 2
|
||||
#define SBI_CLEAR_IPI 3
|
||||
#define SBI_SEND_IPI 4
|
||||
#define SBI_REMOTE_FENCE_I 5
|
||||
#define SBI_REMOTE_SFENCE_VMA 6
|
||||
#define SBI_REMOTE_SFENCE_VMA_ASID 7
|
||||
#define SBI_SHUTDOWN 8
|
||||
|
||||
#define SBI_CALL(which, arg0, arg1, arg2, arg3) ({ \
|
||||
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); \
|
||||
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); \
|
||||
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); \
|
||||
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); \
|
||||
register uintptr_t a7 asm ("a7") = (uintptr_t)(which); \
|
||||
asm volatile ("ecall" \
|
||||
: "+r" (a0) \
|
||||
: "r" (a1), "r" (a2), "r" (a3), "r" (a7) \
|
||||
: "memory"); \
|
||||
a0; \
|
||||
})
|
||||
|
||||
/* Lazy implementations until SBI is finalized */
|
||||
#define SBI_CALL_0(which) SBI_CALL(which, 0, 0, 0, 0)
|
||||
#define SBI_CALL_1(which, arg0) SBI_CALL(which, arg0, 0, 0, 0)
|
||||
#define SBI_CALL_2(which, arg0, arg1) SBI_CALL(which, arg0, arg1, 0, 0)
|
||||
#define SBI_CALL_3(which, arg0, arg1, arg2) \
|
||||
SBI_CALL(which, arg0, arg1, arg2, 0)
|
||||
#define SBI_CALL_4(which, arg0, arg1, arg2, arg3) \
|
||||
SBI_CALL(which, arg0, arg1, arg2, arg3)
|
||||
|
||||
static inline void sbi_console_putchar(int ch)
|
||||
{
|
||||
SBI_CALL_1(SBI_CONSOLE_PUTCHAR, ch);
|
||||
}
|
||||
|
||||
static inline int sbi_console_getchar(void)
|
||||
{
|
||||
return SBI_CALL_0(SBI_CONSOLE_GETCHAR);
|
||||
}
|
||||
|
||||
static inline void sbi_set_timer(uint64_t stime_value)
|
||||
{
|
||||
#if __riscv_xlen == 32
|
||||
SBI_CALL_2(SBI_SET_TIMER, stime_value, stime_value >> 32);
|
||||
#else
|
||||
SBI_CALL_1(SBI_SET_TIMER, stime_value);
|
||||
enum sbi_ext_id {
|
||||
#ifdef CONFIG_RISCV_SBI_V01
|
||||
SBI_EXT_0_1_SET_TIMER = 0x0,
|
||||
SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
|
||||
SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
|
||||
SBI_EXT_0_1_CLEAR_IPI = 0x3,
|
||||
SBI_EXT_0_1_SEND_IPI = 0x4,
|
||||
SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
|
||||
SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
|
||||
SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
|
||||
SBI_EXT_0_1_SHUTDOWN = 0x8,
|
||||
#endif
|
||||
SBI_EXT_BASE = 0x10,
|
||||
SBI_EXT_TIME = 0x54494D45,
|
||||
SBI_EXT_IPI = 0x735049,
|
||||
SBI_EXT_RFENCE = 0x52464E43,
|
||||
SBI_EXT_HSM = 0x48534D,
|
||||
};
|
||||
|
||||
enum sbi_ext_base_fid {
|
||||
SBI_EXT_BASE_GET_SPEC_VERSION = 0,
|
||||
SBI_EXT_BASE_GET_IMP_ID,
|
||||
SBI_EXT_BASE_GET_IMP_VERSION,
|
||||
SBI_EXT_BASE_PROBE_EXT,
|
||||
SBI_EXT_BASE_GET_MVENDORID,
|
||||
SBI_EXT_BASE_GET_MARCHID,
|
||||
SBI_EXT_BASE_GET_MIMPID,
|
||||
};
|
||||
|
||||
enum sbi_ext_time_fid {
|
||||
SBI_EXT_TIME_SET_TIMER = 0,
|
||||
};
|
||||
|
||||
enum sbi_ext_ipi_fid {
|
||||
SBI_EXT_IPI_SEND_IPI = 0,
|
||||
};
|
||||
|
||||
enum sbi_ext_rfence_fid {
|
||||
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
|
||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
||||
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
||||
};
|
||||
|
||||
enum sbi_ext_hsm_fid {
|
||||
SBI_EXT_HSM_HART_START = 0,
|
||||
SBI_EXT_HSM_HART_STOP,
|
||||
SBI_EXT_HSM_HART_STATUS,
|
||||
};
|
||||
|
||||
enum sbi_hsm_hart_status {
|
||||
SBI_HSM_HART_STATUS_STARTED = 0,
|
||||
SBI_HSM_HART_STATUS_STOPPED,
|
||||
SBI_HSM_HART_STATUS_START_PENDING,
|
||||
SBI_HSM_HART_STATUS_STOP_PENDING,
|
||||
};
|
||||
|
||||
#define SBI_SPEC_VERSION_DEFAULT 0x1
|
||||
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
|
||||
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
|
||||
#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
|
||||
|
||||
/* SBI return error codes */
|
||||
#define SBI_SUCCESS 0
|
||||
#define SBI_ERR_FAILURE -1
|
||||
#define SBI_ERR_NOT_SUPPORTED -2
|
||||
#define SBI_ERR_INVALID_PARAM -3
|
||||
#define SBI_ERR_DENIED -4
|
||||
#define SBI_ERR_INVALID_ADDRESS -5
|
||||
|
||||
extern unsigned long sbi_spec_version;
|
||||
struct sbiret {
|
||||
long error;
|
||||
long value;
|
||||
};
|
||||
|
||||
int sbi_init(void);
|
||||
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
|
||||
unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4,
|
||||
unsigned long arg5);
|
||||
|
||||
void sbi_console_putchar(int ch);
|
||||
int sbi_console_getchar(void);
|
||||
void sbi_set_timer(uint64_t stime_value);
|
||||
void sbi_shutdown(void);
|
||||
void sbi_clear_ipi(void);
|
||||
void sbi_send_ipi(const unsigned long *hart_mask);
|
||||
void sbi_remote_fence_i(const unsigned long *hart_mask);
|
||||
void sbi_remote_sfence_vma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size);
|
||||
|
||||
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long asid);
|
||||
int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size);
|
||||
int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long vmid);
|
||||
int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size);
|
||||
int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long asid);
|
||||
int sbi_probe_extension(int ext);
|
||||
|
||||
/* Check if current SBI specification version is 0.1 or not */
|
||||
static inline int sbi_spec_is_0_1(void)
|
||||
{
|
||||
return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
|
||||
}
|
||||
|
||||
static inline void sbi_shutdown(void)
|
||||
/* Get the major version of SBI */
|
||||
static inline unsigned long sbi_major_version(void)
|
||||
{
|
||||
SBI_CALL_0(SBI_SHUTDOWN);
|
||||
return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
|
||||
SBI_SPEC_VERSION_MAJOR_MASK;
|
||||
}
|
||||
|
||||
static inline void sbi_clear_ipi(void)
|
||||
/* Get the minor version of SBI */
|
||||
static inline unsigned long sbi_minor_version(void)
|
||||
{
|
||||
SBI_CALL_0(SBI_CLEAR_IPI);
|
||||
return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
|
||||
}
|
||||
|
||||
static inline void sbi_send_ipi(const unsigned long *hart_mask)
|
||||
{
|
||||
SBI_CALL_1(SBI_SEND_IPI, hart_mask);
|
||||
}
|
||||
|
||||
static inline void sbi_remote_fence_i(const unsigned long *hart_mask)
|
||||
{
|
||||
SBI_CALL_1(SBI_REMOTE_FENCE_I, hart_mask);
|
||||
}
|
||||
|
||||
static inline void sbi_remote_sfence_vma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
SBI_CALL_3(SBI_REMOTE_SFENCE_VMA, hart_mask, start, size);
|
||||
}
|
||||
|
||||
static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long asid)
|
||||
{
|
||||
SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
|
||||
}
|
||||
int sbi_err_map_linux_errno(int err);
|
||||
#else /* CONFIG_RISCV_SBI */
|
||||
/* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
|
||||
void sbi_set_timer(uint64_t stime_value);
|
||||
void sbi_clear_ipi(void);
|
||||
void sbi_send_ipi(const unsigned long *hart_mask);
|
||||
void sbi_remote_fence_i(const unsigned long *hart_mask);
|
||||
void sbi_init(void);
|
||||
#endif /* CONFIG_RISCV_SBI */
|
||||
#endif /* _ASM_RISCV_SBI_H */
|
||||
|
48
arch/riscv/include/asm/set_memory.h
Normal file
48
arch/riscv/include/asm/set_memory.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2019 SiFive
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_SET_MEMORY_H
|
||||
#define _ASM_RISCV_SET_MEMORY_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Functions to change memory attributes.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
int set_memory_ro(unsigned long addr, int numpages);
|
||||
int set_memory_rw(unsigned long addr, int numpages);
|
||||
int set_memory_x(unsigned long addr, int numpages);
|
||||
int set_memory_nx(unsigned long addr, int numpages);
|
||||
#else
|
||||
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
|
||||
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
void set_kernel_text_ro(void);
|
||||
void set_kernel_text_rw(void);
|
||||
#else
|
||||
static inline void set_kernel_text_ro(void) { }
|
||||
static inline void set_kernel_text_rw(void) { }
|
||||
#endif
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
|
||||
#ifdef CONFIG_64BIT
|
||||
#define SECTION_ALIGN (1 << 21)
|
||||
#else
|
||||
#define SECTION_ALIGN (1 << 22)
|
||||
#endif
|
||||
#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
|
||||
#define SECTION_ALIGN L1_CACHE_BYTES
|
||||
#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
|
||||
|
||||
#endif /* _ASM_RISCV_SET_MEMORY_H */
|
@ -43,6 +43,13 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
|
||||
*/
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
|
||||
#if defined CONFIG_HOTPLUG_CPU
|
||||
int __cpu_disable(void);
|
||||
void __cpu_die(unsigned int cpu);
|
||||
void cpu_stop(void);
|
||||
#else
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#else
|
||||
|
||||
static inline void show_ipi_stats(struct seq_file *p, int prec)
|
||||
@ -61,5 +68,22 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
|
||||
return boot_cpu_hartid;
|
||||
}
|
||||
|
||||
static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
|
||||
struct cpumask *out)
|
||||
{
|
||||
cpumask_clear(out);
|
||||
cpumask_set_cpu(boot_cpu_hartid, out);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
|
||||
bool cpu_has_hotplug(unsigned int cpu);
|
||||
#else
|
||||
static inline bool cpu_has_hotplug(unsigned int cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_SMP_H */
|
||||
|
23
arch/riscv/include/asm/soc.h
Normal file
23
arch/riscv/include/asm/soc.h
Normal file
@ -0,0 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_SOC_H
|
||||
#define _ASM_RISCV_SOC_H
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \
|
||||
static const struct of_device_id __soc_early_init__##name \
|
||||
__used __section(__soc_early_init_table) \
|
||||
= { .compatible = compat, .data = fn }
|
||||
|
||||
void soc_early_init(void);
|
||||
|
||||
extern unsigned long __soc_early_init_table_start;
|
||||
extern unsigned long __soc_early_init_table_end;
|
||||
|
||||
#endif
|
@ -4,12 +4,14 @@
|
||||
#
|
||||
|
||||
ifdef CONFIG_FTRACE
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
CFLAGS_REMOVE_patch.o = -pg
|
||||
endif
|
||||
|
||||
extra-y += head.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y += soc.o
|
||||
obj-y += cpu.o
|
||||
obj-y += cpufeature.o
|
||||
obj-y += entry.o
|
||||
@ -26,12 +28,15 @@ obj-y += traps.o
|
||||
obj-y += riscv_ksyms.o
|
||||
obj-y += stacktrace.o
|
||||
obj-y += cacheinfo.o
|
||||
obj-y += patch.o
|
||||
obj-$(CONFIG_MMU) += vdso.o vdso/
|
||||
|
||||
obj-$(CONFIG_RISCV_M_MODE) += clint.o
|
||||
obj-$(CONFIG_RISCV_M_MODE) += clint.o traps_misaligned.o
|
||||
obj-$(CONFIG_FPU) += fpu.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_SMP) += cpu_ops.o
|
||||
obj-$(CONFIG_SMP) += cpu_ops_spinwait.o
|
||||
obj-$(CONFIG_MODULES) += module.o
|
||||
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
|
||||
|
||||
@ -42,5 +47,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
|
||||
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
|
||||
obj-$(CONFIG_RISCV_SBI) += sbi.o
|
||||
ifeq ($(CONFIG_RISCV_SBI), y)
|
||||
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
|
||||
endif
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
|
||||
|
||||
clean:
|
||||
|
87
arch/riscv/kernel/cpu-hotplug.c
Normal file
87
arch/riscv/kernel/cpu-hotplug.c
Normal file
@ -0,0 +1,87 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sched/hotplug.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/sbi.h>
|
||||
|
||||
void cpu_stop(void);
|
||||
void arch_cpu_idle_dead(void)
|
||||
{
|
||||
cpu_stop();
|
||||
}
|
||||
|
||||
bool cpu_has_hotplug(unsigned int cpu)
|
||||
{
|
||||
if (cpu_ops[cpu]->cpu_stop)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown.
|
||||
*/
|
||||
int __cpu_disable(void)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (cpu_ops[cpu]->cpu_disable)
|
||||
ret = cpu_ops[cpu]->cpu_disable(cpu);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
remove_cpu_topology(cpu);
|
||||
set_cpu_online(cpu, false);
|
||||
irq_migrate_all_off_this_cpu();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called on the thread which is asking for a CPU to be shutdown.
|
||||
*/
|
||||
void __cpu_die(unsigned int cpu)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!cpu_wait_death(cpu, 5)) {
|
||||
pr_err("CPU %u: didn't die\n", cpu);
|
||||
return;
|
||||
}
|
||||
pr_notice("CPU%u: off\n", cpu);
|
||||
|
||||
/* Verify from the firmware if the cpu is really stopped*/
|
||||
if (cpu_ops[cpu]->cpu_is_stopped)
|
||||
ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
|
||||
if (ret)
|
||||
pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from the idle thread for the CPU which has been shutdown.
|
||||
*/
|
||||
void cpu_stop(void)
|
||||
{
|
||||
idle_task_exit();
|
||||
|
||||
(void)cpu_report_death();
|
||||
|
||||
cpu_ops[smp_processor_id()]->cpu_stop();
|
||||
/* It should never reach here */
|
||||
BUG();
|
||||
}
|
46
arch/riscv/kernel/cpu_ops.c
Normal file
46
arch/riscv/kernel/cpu_ops.c
Normal file
@ -0,0 +1,46 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
|
||||
|
||||
void *__cpu_up_stack_pointer[NR_CPUS];
|
||||
void *__cpu_up_task_pointer[NR_CPUS];
|
||||
|
||||
extern const struct cpu_operations cpu_ops_sbi;
|
||||
extern const struct cpu_operations cpu_ops_spinwait;
|
||||
|
||||
void cpu_update_secondary_bootdata(unsigned int cpuid,
|
||||
struct task_struct *tidle)
|
||||
{
|
||||
int hartid = cpuid_to_hartid_map(cpuid);
|
||||
|
||||
/* Make sure tidle is updated */
|
||||
smp_mb();
|
||||
WRITE_ONCE(__cpu_up_stack_pointer[hartid],
|
||||
task_stack_page(tidle) + THREAD_SIZE);
|
||||
WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
|
||||
}
|
||||
|
||||
void __init cpu_set_ops(int cpuid)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
|
||||
if (!cpuid)
|
||||
pr_info("SBI v0.2 HSM extension detected\n");
|
||||
cpu_ops[cpuid] = &cpu_ops_sbi;
|
||||
} else
|
||||
#endif
|
||||
cpu_ops[cpuid] = &cpu_ops_spinwait;
|
||||
}
|
115
arch/riscv/kernel/cpu_ops_sbi.c
Normal file
115
arch/riscv/kernel/cpu_ops_sbi.c
Normal file
@ -0,0 +1,115 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* HSM extension and cpu_ops implementation.
|
||||
*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
extern char secondary_start_sbi[];
|
||||
const struct cpu_operations cpu_ops_sbi;
|
||||
|
||||
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
|
||||
unsigned long priv)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
|
||||
hartid, saddr, priv, 0, 0, 0);
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int sbi_hsm_hart_stop(void)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sbi_hsm_hart_get_status(unsigned long hartid)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
|
||||
hartid, 0, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
else
|
||||
return ret.value;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
|
||||
{
|
||||
int rc;
|
||||
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
|
||||
int hartid = cpuid_to_hartid_map(cpuid);
|
||||
|
||||
cpu_update_secondary_bootdata(cpuid, tidle);
|
||||
rc = sbi_hsm_hart_start(hartid, boot_addr, 0);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int sbi_cpu_prepare(unsigned int cpuid)
|
||||
{
|
||||
if (!cpu_ops_sbi.cpu_start) {
|
||||
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int sbi_cpu_disable(unsigned int cpuid)
|
||||
{
|
||||
if (!cpu_ops_sbi.cpu_stop)
|
||||
return -EOPNOTSUPP;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sbi_cpu_stop(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = sbi_hsm_hart_stop();
|
||||
pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret);
|
||||
}
|
||||
|
||||
static int sbi_cpu_is_stopped(unsigned int cpuid)
|
||||
{
|
||||
int rc;
|
||||
int hartid = cpuid_to_hartid_map(cpuid);
|
||||
|
||||
rc = sbi_hsm_hart_get_status(hartid);
|
||||
|
||||
if (rc == SBI_HSM_HART_STATUS_STOPPED)
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct cpu_operations cpu_ops_sbi = {
|
||||
.name = "sbi",
|
||||
.cpu_prepare = sbi_cpu_prepare,
|
||||
.cpu_start = sbi_cpu_start,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_disable = sbi_cpu_disable,
|
||||
.cpu_stop = sbi_cpu_stop,
|
||||
.cpu_is_stopped = sbi_cpu_is_stopped,
|
||||
#endif
|
||||
};
|
43
arch/riscv/kernel/cpu_ops_spinwait.c
Normal file
43
arch/riscv/kernel/cpu_ops_spinwait.c
Normal file
@ -0,0 +1,43 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
const struct cpu_operations cpu_ops_spinwait;
|
||||
|
||||
static int spinwait_cpu_prepare(unsigned int cpuid)
|
||||
{
|
||||
if (!cpu_ops_spinwait.cpu_start) {
|
||||
pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
|
||||
{
|
||||
/*
|
||||
* In this protocol, all cpus boot on their own accord. _start
|
||||
* selects the first cpu to boot the kernel and causes the remainder
|
||||
* of the cpus to spin in a loop waiting for their stack pointer to be
|
||||
* setup by that main cpu. Writing to bootdata
|
||||
* (i.e __cpu_up_stack_pointer) signals to the spinning cpus that they
|
||||
* can continue the boot process.
|
||||
*/
|
||||
cpu_update_secondary_bootdata(cpuid, tidle);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct cpu_operations cpu_ops_spinwait = {
|
||||
.name = "spinwait",
|
||||
.cpu_prepare = spinwait_cpu_prepare,
|
||||
.cpu_start = spinwait_cpu_start,
|
||||
};
|
@ -13,17 +13,11 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.altmacro
|
||||
|
||||
/*
|
||||
* Prepares to enter a system call or exception by saving all registers to the
|
||||
* stack.
|
||||
*/
|
||||
.macro SAVE_ALL
|
||||
LOCAL _restore_kernel_tpsp
|
||||
LOCAL _save_context
|
||||
#if !IS_ENABLED(CONFIG_PREEMPTION)
|
||||
.set resume_kernel, restore_all
|
||||
#endif
|
||||
|
||||
ENTRY(handle_exception)
|
||||
/*
|
||||
* If coming from userspace, preserve the user thread pointer and load
|
||||
* the kernel thread pointer. If we came from the kernel, the scratch
|
||||
@ -90,77 +84,6 @@ _save_context:
|
||||
REG_S s3, PT_BADADDR(sp)
|
||||
REG_S s4, PT_CAUSE(sp)
|
||||
REG_S s5, PT_TP(sp)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Prepares to return from a system call or exception by restoring all
|
||||
* registers from the stack.
|
||||
*/
|
||||
.macro RESTORE_ALL
|
||||
REG_L a0, PT_STATUS(sp)
|
||||
/*
|
||||
* The current load reservation is effectively part of the processor's
|
||||
* state, in the sense that load reservations cannot be shared between
|
||||
* different hart contexts. We can't actually save and restore a load
|
||||
* reservation, so instead here we clear any existing reservation --
|
||||
* it's always legal for implementations to clear load reservations at
|
||||
* any point (as long as the forward progress guarantee is kept, but
|
||||
* we'll ignore that here).
|
||||
*
|
||||
* Dangling load reservations can be the result of taking a trap in the
|
||||
* middle of an LR/SC sequence, but can also be the result of a taken
|
||||
* forward branch around an SC -- which is how we implement CAS. As a
|
||||
* result we need to clear reservations between the last CAS and the
|
||||
* jump back to the new context. While it is unlikely the store
|
||||
* completes, implementations are allowed to expand reservations to be
|
||||
* arbitrarily large.
|
||||
*/
|
||||
REG_L a2, PT_EPC(sp)
|
||||
REG_SC x0, a2, PT_EPC(sp)
|
||||
|
||||
csrw CSR_STATUS, a0
|
||||
csrw CSR_EPC, a2
|
||||
|
||||
REG_L x1, PT_RA(sp)
|
||||
REG_L x3, PT_GP(sp)
|
||||
REG_L x4, PT_TP(sp)
|
||||
REG_L x5, PT_T0(sp)
|
||||
REG_L x6, PT_T1(sp)
|
||||
REG_L x7, PT_T2(sp)
|
||||
REG_L x8, PT_S0(sp)
|
||||
REG_L x9, PT_S1(sp)
|
||||
REG_L x10, PT_A0(sp)
|
||||
REG_L x11, PT_A1(sp)
|
||||
REG_L x12, PT_A2(sp)
|
||||
REG_L x13, PT_A3(sp)
|
||||
REG_L x14, PT_A4(sp)
|
||||
REG_L x15, PT_A5(sp)
|
||||
REG_L x16, PT_A6(sp)
|
||||
REG_L x17, PT_A7(sp)
|
||||
REG_L x18, PT_S2(sp)
|
||||
REG_L x19, PT_S3(sp)
|
||||
REG_L x20, PT_S4(sp)
|
||||
REG_L x21, PT_S5(sp)
|
||||
REG_L x22, PT_S6(sp)
|
||||
REG_L x23, PT_S7(sp)
|
||||
REG_L x24, PT_S8(sp)
|
||||
REG_L x25, PT_S9(sp)
|
||||
REG_L x26, PT_S10(sp)
|
||||
REG_L x27, PT_S11(sp)
|
||||
REG_L x28, PT_T3(sp)
|
||||
REG_L x29, PT_T4(sp)
|
||||
REG_L x30, PT_T5(sp)
|
||||
REG_L x31, PT_T6(sp)
|
||||
|
||||
REG_L x2, PT_SP(sp)
|
||||
.endm
|
||||
|
||||
#if !IS_ENABLED(CONFIG_PREEMPTION)
|
||||
.set resume_kernel, restore_all
|
||||
#endif
|
||||
|
||||
ENTRY(handle_exception)
|
||||
SAVE_ALL
|
||||
|
||||
/*
|
||||
* Set the scratch register to 0, so that if a recursive exception
|
||||
@ -291,7 +214,63 @@ resume_userspace:
|
||||
csrw CSR_SCRATCH, tp
|
||||
|
||||
restore_all:
|
||||
RESTORE_ALL
|
||||
REG_L a0, PT_STATUS(sp)
|
||||
/*
|
||||
* The current load reservation is effectively part of the processor's
|
||||
* state, in the sense that load reservations cannot be shared between
|
||||
* different hart contexts. We can't actually save and restore a load
|
||||
* reservation, so instead here we clear any existing reservation --
|
||||
* it's always legal for implementations to clear load reservations at
|
||||
* any point (as long as the forward progress guarantee is kept, but
|
||||
* we'll ignore that here).
|
||||
*
|
||||
* Dangling load reservations can be the result of taking a trap in the
|
||||
* middle of an LR/SC sequence, but can also be the result of a taken
|
||||
* forward branch around an SC -- which is how we implement CAS. As a
|
||||
* result we need to clear reservations between the last CAS and the
|
||||
* jump back to the new context. While it is unlikely the store
|
||||
* completes, implementations are allowed to expand reservations to be
|
||||
* arbitrarily large.
|
||||
*/
|
||||
REG_L a2, PT_EPC(sp)
|
||||
REG_SC x0, a2, PT_EPC(sp)
|
||||
|
||||
csrw CSR_STATUS, a0
|
||||
csrw CSR_EPC, a2
|
||||
|
||||
REG_L x1, PT_RA(sp)
|
||||
REG_L x3, PT_GP(sp)
|
||||
REG_L x4, PT_TP(sp)
|
||||
REG_L x5, PT_T0(sp)
|
||||
REG_L x6, PT_T1(sp)
|
||||
REG_L x7, PT_T2(sp)
|
||||
REG_L x8, PT_S0(sp)
|
||||
REG_L x9, PT_S1(sp)
|
||||
REG_L x10, PT_A0(sp)
|
||||
REG_L x11, PT_A1(sp)
|
||||
REG_L x12, PT_A2(sp)
|
||||
REG_L x13, PT_A3(sp)
|
||||
REG_L x14, PT_A4(sp)
|
||||
REG_L x15, PT_A5(sp)
|
||||
REG_L x16, PT_A6(sp)
|
||||
REG_L x17, PT_A7(sp)
|
||||
REG_L x18, PT_S2(sp)
|
||||
REG_L x19, PT_S3(sp)
|
||||
REG_L x20, PT_S4(sp)
|
||||
REG_L x21, PT_S5(sp)
|
||||
REG_L x22, PT_S6(sp)
|
||||
REG_L x23, PT_S7(sp)
|
||||
REG_L x24, PT_S8(sp)
|
||||
REG_L x25, PT_S9(sp)
|
||||
REG_L x26, PT_S10(sp)
|
||||
REG_L x27, PT_S11(sp)
|
||||
REG_L x28, PT_T3(sp)
|
||||
REG_L x29, PT_T4(sp)
|
||||
REG_L x30, PT_T5(sp)
|
||||
REG_L x31, PT_T6(sp)
|
||||
|
||||
REG_L x2, PT_SP(sp)
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
mret
|
||||
#else
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/patch.h>
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static int ftrace_check_current_call(unsigned long hook_pos,
|
||||
@ -46,20 +47,14 @@ static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
|
||||
{
|
||||
unsigned int call[2];
|
||||
unsigned int nops[2] = {NOP4, NOP4};
|
||||
int ret = 0;
|
||||
|
||||
make_call(hook_pos, target, call);
|
||||
|
||||
/* replace the auipc-jalr pair at once */
|
||||
ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
|
||||
MCOUNT_INSN_SIZE);
|
||||
/* return must be -EPERM on write error */
|
||||
if (ret)
|
||||
/* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
|
||||
if (riscv_patch_text_nosync
|
||||
((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
|
||||
return -EPERM;
|
||||
|
||||
smp_mb();
|
||||
flush_icache_range((void *)hook_pos, (void *)hook_pos + MCOUNT_INSN_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/image.h>
|
||||
|
||||
__INIT
|
||||
__HEAD
|
||||
ENTRY(_start)
|
||||
/*
|
||||
* Image header expected by Linux boot-loaders. The image header data
|
||||
@ -45,8 +45,111 @@ ENTRY(_start)
|
||||
.ascii RISCV_IMAGE_MAGIC2
|
||||
.word 0
|
||||
|
||||
.global _start_kernel
|
||||
_start_kernel:
|
||||
.align 2
|
||||
#ifdef CONFIG_MMU
|
||||
relocate:
|
||||
/* Relocate return address */
|
||||
li a1, PAGE_OFFSET
|
||||
la a2, _start
|
||||
sub a1, a1, a2
|
||||
add ra, ra, a1
|
||||
|
||||
/* Point stvec to virtual address of intruction after satp write */
|
||||
la a2, 1f
|
||||
add a2, a2, a1
|
||||
csrw CSR_TVEC, a2
|
||||
|
||||
/* Compute satp for kernel page tables, but don't load it yet */
|
||||
srl a2, a0, PAGE_SHIFT
|
||||
li a1, SATP_MODE
|
||||
or a2, a2, a1
|
||||
|
||||
/*
|
||||
* Load trampoline page directory, which will cause us to trap to
|
||||
* stvec if VA != PA, or simply fall through if VA == PA. We need a
|
||||
* full fence here because setup_vm() just wrote these PTEs and we need
|
||||
* to ensure the new translations are in use.
|
||||
*/
|
||||
la a0, trampoline_pg_dir
|
||||
srl a0, a0, PAGE_SHIFT
|
||||
or a0, a0, a1
|
||||
sfence.vma
|
||||
csrw CSR_SATP, a0
|
||||
.align 2
|
||||
1:
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a0, .Lsecondary_park
|
||||
csrw CSR_TVEC, a0
|
||||
|
||||
/* Reload the global pointer */
|
||||
.option push
|
||||
.option norelax
|
||||
la gp, __global_pointer$
|
||||
.option pop
|
||||
|
||||
/*
|
||||
* Switch to kernel page tables. A full fence is necessary in order to
|
||||
* avoid using the trampoline translations, which are only correct for
|
||||
* the first superpage. Fetching the fence is guarnteed to work
|
||||
* because that first superpage is translated the same way.
|
||||
*/
|
||||
csrw CSR_SATP, a2
|
||||
sfence.vma
|
||||
|
||||
ret
|
||||
#endif /* CONFIG_MMU */
|
||||
#ifdef CONFIG_SMP
|
||||
.global secondary_start_sbi
|
||||
secondary_start_sbi:
|
||||
/* Mask all interrupts */
|
||||
csrw CSR_IE, zero
|
||||
csrw CSR_IP, zero
|
||||
|
||||
/* Load the global pointer */
|
||||
.option push
|
||||
.option norelax
|
||||
la gp, __global_pointer$
|
||||
.option pop
|
||||
|
||||
/*
|
||||
* Disable FPU to detect illegal usage of
|
||||
* floating point in kernel space
|
||||
*/
|
||||
li t0, SR_FS
|
||||
csrc CSR_STATUS, t0
|
||||
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a3, .Lsecondary_park
|
||||
csrw CSR_TVEC, a3
|
||||
|
||||
slli a3, a0, LGREG
|
||||
la a4, __cpu_up_stack_pointer
|
||||
la a5, __cpu_up_task_pointer
|
||||
add a4, a3, a4
|
||||
add a5, a3, a5
|
||||
REG_L sp, (a4)
|
||||
REG_L tp, (a5)
|
||||
|
||||
.global secondary_start_common
|
||||
secondary_start_common:
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/* Enable virtual memory and relocate to virtual address */
|
||||
la a0, swapper_pg_dir
|
||||
call relocate
|
||||
#endif
|
||||
tail smp_callin
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
.Lsecondary_park:
|
||||
/* We lack SMP support or have too many harts, so park this hart */
|
||||
wfi
|
||||
j .Lsecondary_park
|
||||
|
||||
END(_start)
|
||||
|
||||
__INIT
|
||||
ENTRY(_start_kernel)
|
||||
/* Mask all interrupts */
|
||||
csrw CSR_IE, zero
|
||||
csrw CSR_IP, zero
|
||||
@ -131,62 +234,10 @@ clear_bss_done:
|
||||
call kasan_early_init
|
||||
#endif
|
||||
/* Start the kernel */
|
||||
call soc_early_init
|
||||
call parse_dtb
|
||||
tail start_kernel
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
relocate:
|
||||
/* Relocate return address */
|
||||
li a1, PAGE_OFFSET
|
||||
la a2, _start
|
||||
sub a1, a1, a2
|
||||
add ra, ra, a1
|
||||
|
||||
/* Point stvec to virtual address of intruction after satp write */
|
||||
la a2, 1f
|
||||
add a2, a2, a1
|
||||
csrw CSR_TVEC, a2
|
||||
|
||||
/* Compute satp for kernel page tables, but don't load it yet */
|
||||
srl a2, a0, PAGE_SHIFT
|
||||
li a1, SATP_MODE
|
||||
or a2, a2, a1
|
||||
|
||||
/*
|
||||
* Load trampoline page directory, which will cause us to trap to
|
||||
* stvec if VA != PA, or simply fall through if VA == PA. We need a
|
||||
* full fence here because setup_vm() just wrote these PTEs and we need
|
||||
* to ensure the new translations are in use.
|
||||
*/
|
||||
la a0, trampoline_pg_dir
|
||||
srl a0, a0, PAGE_SHIFT
|
||||
or a0, a0, a1
|
||||
sfence.vma
|
||||
csrw CSR_SATP, a0
|
||||
.align 2
|
||||
1:
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
la a0, .Lsecondary_park
|
||||
csrw CSR_TVEC, a0
|
||||
|
||||
/* Reload the global pointer */
|
||||
.option push
|
||||
.option norelax
|
||||
la gp, __global_pointer$
|
||||
.option pop
|
||||
|
||||
/*
|
||||
* Switch to kernel page tables. A full fence is necessary in order to
|
||||
* avoid using the trampoline translations, which are only correct for
|
||||
* the first superpage. Fetching the fence is guarnteed to work
|
||||
* because that first superpage is translated the same way.
|
||||
*/
|
||||
csrw CSR_SATP, a2
|
||||
sfence.vma
|
||||
|
||||
ret
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
.Lsecondary_start:
|
||||
#ifdef CONFIG_SMP
|
||||
/* Set trap vector to spin forever to help debug */
|
||||
@ -211,16 +262,10 @@ relocate:
|
||||
beqz tp, .Lwait_for_cpu_up
|
||||
fence
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/* Enable virtual memory and relocate to virtual address */
|
||||
la a0, swapper_pg_dir
|
||||
call relocate
|
||||
tail secondary_start_common
|
||||
#endif
|
||||
|
||||
tail smp_callin
|
||||
#endif
|
||||
|
||||
END(_start)
|
||||
END(_start_kernel)
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
ENTRY(reset_regs)
|
||||
@ -301,13 +346,6 @@ ENTRY(reset_regs)
|
||||
END(reset_regs)
|
||||
#endif /* CONFIG_RISCV_M_MODE */
|
||||
|
||||
.section ".text", "ax",@progbits
|
||||
.align 2
|
||||
.Lsecondary_park:
|
||||
/* We lack SMP support or have too many harts, so park this hart */
|
||||
wfi
|
||||
j .Lsecondary_park
|
||||
|
||||
__PAGE_ALIGNED_BSS
|
||||
/* Empty zero page */
|
||||
.balign PAGE_SIZE
|
||||
|
120
arch/riscv/kernel/patch.c
Normal file
120
arch/riscv/kernel/patch.c
Normal file
@ -0,0 +1,120 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
struct riscv_insn_patch {
|
||||
void *addr;
|
||||
u32 insn;
|
||||
atomic_t cpu_count;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
static void __kprobes *patch_map(void *addr, int fixmap)
|
||||
{
|
||||
uintptr_t uintaddr = (uintptr_t) addr;
|
||||
struct page *page;
|
||||
|
||||
if (core_kernel_text(uintaddr))
|
||||
page = phys_to_page(__pa_symbol(addr));
|
||||
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
|
||||
page = vmalloc_to_page(addr);
|
||||
else
|
||||
return addr;
|
||||
|
||||
BUG_ON(!page);
|
||||
|
||||
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
|
||||
(uintaddr & ~PAGE_MASK));
|
||||
}
|
||||
|
||||
static void __kprobes patch_unmap(int fixmap)
|
||||
{
|
||||
clear_fixmap(fixmap);
|
||||
}
|
||||
|
||||
static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
|
||||
{
|
||||
void *waddr = addr;
|
||||
bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
|
||||
unsigned long flags = 0;
|
||||
int ret;
|
||||
|
||||
raw_spin_lock_irqsave(&patch_lock, flags);
|
||||
|
||||
if (across_pages)
|
||||
patch_map(addr + len, FIX_TEXT_POKE1);
|
||||
|
||||
waddr = patch_map(addr, FIX_TEXT_POKE0);
|
||||
|
||||
ret = probe_kernel_write(waddr, insn, len);
|
||||
|
||||
patch_unmap(FIX_TEXT_POKE0);
|
||||
|
||||
if (across_pages)
|
||||
patch_unmap(FIX_TEXT_POKE1);
|
||||
|
||||
raw_spin_unlock_irqrestore(&patch_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static int __kprobes riscv_insn_write(void *addr, const void *insn, size_t len)
|
||||
{
|
||||
return probe_kernel_write(addr, insn, len);
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
int __kprobes riscv_patch_text_nosync(void *addr, const void *insns, size_t len)
|
||||
{
|
||||
u32 *tp = addr;
|
||||
int ret;
|
||||
|
||||
ret = riscv_insn_write(tp, insns, len);
|
||||
|
||||
if (!ret)
|
||||
flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __kprobes riscv_patch_text_cb(void *data)
|
||||
{
|
||||
struct riscv_insn_patch *patch = data;
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_inc_return(&patch->cpu_count) == 1) {
|
||||
ret =
|
||||
riscv_patch_text_nosync(patch->addr, &patch->insn,
|
||||
GET_INSN_LENGTH(patch->insn));
|
||||
atomic_inc(&patch->cpu_count);
|
||||
} else {
|
||||
while (atomic_read(&patch->cpu_count) <= num_online_cpus())
|
||||
cpu_relax();
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes riscv_patch_text(void *addr, u32 insn)
|
||||
{
|
||||
struct riscv_insn_patch patch = {
|
||||
.addr = addr,
|
||||
.insn = insn,
|
||||
.cpu_count = ATOMIC_INIT(0),
|
||||
};
|
||||
|
||||
return stop_machine_cpuslocked(riscv_patch_text_cb,
|
||||
&patch, cpu_online_mask);
|
||||
}
|
@ -22,6 +22,8 @@
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
unsigned long gp_in_global __asm__("gp");
|
||||
|
||||
extern asmlinkage void ret_from_fork(void);
|
||||
extern asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
@ -107,9 +109,8 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
||||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
/* Kernel thread */
|
||||
const register unsigned long gp __asm__ ("gp");
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp;
|
||||
childregs->gp = gp_in_global;
|
||||
/* Supervisor/Machine, irqs on: */
|
||||
childregs->status = SR_PP | SR_PIE;
|
||||
|
||||
|
@ -1,17 +1,588 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* SBI initialilization and all extension implementation.
|
||||
*
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
/* default SBI version is 0.1 */
|
||||
unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT;
|
||||
EXPORT_SYMBOL(sbi_spec_version);
|
||||
|
||||
static void (*__sbi_set_timer)(uint64_t stime);
|
||||
static int (*__sbi_send_ipi)(const unsigned long *hart_mask);
|
||||
static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5);
|
||||
|
||||
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
|
||||
unsigned long arg1, unsigned long arg2,
|
||||
unsigned long arg3, unsigned long arg4,
|
||||
unsigned long arg5)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
|
||||
register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
|
||||
register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
|
||||
register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
|
||||
register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
|
||||
register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
|
||||
register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
|
||||
register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
|
||||
asm volatile ("ecall"
|
||||
: "+r" (a0), "+r" (a1)
|
||||
: "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
|
||||
: "memory");
|
||||
ret.error = a0;
|
||||
ret.value = a1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_ecall);
|
||||
|
||||
int sbi_err_map_linux_errno(int err)
|
||||
{
|
||||
switch (err) {
|
||||
case SBI_SUCCESS:
|
||||
return 0;
|
||||
case SBI_ERR_DENIED:
|
||||
return -EPERM;
|
||||
case SBI_ERR_INVALID_PARAM:
|
||||
return -EINVAL;
|
||||
case SBI_ERR_INVALID_ADDRESS:
|
||||
return -EFAULT;
|
||||
case SBI_ERR_NOT_SUPPORTED:
|
||||
case SBI_ERR_FAILURE:
|
||||
default:
|
||||
return -ENOTSUPP;
|
||||
};
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_err_map_linux_errno);
|
||||
|
||||
#ifdef CONFIG_RISCV_SBI_V01
|
||||
/**
|
||||
* sbi_console_putchar() - Writes given character to the console device.
|
||||
* @ch: The data to be written to the console.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_console_putchar(int ch)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_console_putchar);
|
||||
|
||||
/**
|
||||
* sbi_console_getchar() - Reads a byte from console device.
|
||||
*
|
||||
* Returns the value read from console.
|
||||
*/
|
||||
int sbi_console_getchar(void)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
return ret.error;
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_console_getchar);
|
||||
|
||||
/**
|
||||
* sbi_shutdown() - Remove all the harts from executing supervisor code.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_shutdown(void)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_set_timer);
|
||||
|
||||
/**
|
||||
* sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_clear_ipi(void)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_shutdown);
|
||||
|
||||
/**
|
||||
* sbi_set_timer_v01() - Program the timer for next timer event.
|
||||
* @stime_value: The value after which next timer event should fire.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static void __sbi_set_timer_v01(uint64_t stime_value)
|
||||
{
|
||||
#if __riscv_xlen == 32
|
||||
sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
|
||||
stime_value >> 32, 0, 0, 0, 0);
|
||||
#else
|
||||
sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
|
||||
{
|
||||
sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)hart_mask,
|
||||
0, 0, 0, 0, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
/* v0.2 function IDs are equivalent to v0.1 extension IDs */
|
||||
switch (fid) {
|
||||
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
|
||||
sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
|
||||
(unsigned long)hart_mask, 0, 0, 0, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
|
||||
sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
|
||||
(unsigned long)hart_mask, start, size,
|
||||
0, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
|
||||
sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
|
||||
(unsigned long)hart_mask, start, size,
|
||||
arg4, 0, 0);
|
||||
break;
|
||||
default:
|
||||
pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
|
||||
result = -EINVAL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
#else
|
||||
static void __sbi_set_timer_v01(uint64_t stime_value)
|
||||
{
|
||||
pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
|
||||
sbi_major_version(), sbi_minor_version());
|
||||
}
|
||||
|
||||
static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
|
||||
{
|
||||
pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
|
||||
sbi_major_version(), sbi_minor_version());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
|
||||
sbi_major_version(), sbi_minor_version());
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_RISCV_SBI_V01 */
|
||||
|
||||
static void __sbi_set_timer_v02(uint64_t stime_value)
|
||||
{
|
||||
#if __riscv_xlen == 32
|
||||
sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
|
||||
stime_value >> 32, 0, 0, 0, 0);
|
||||
#else
|
||||
sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
|
||||
0, 0, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __sbi_send_ipi_v02(const unsigned long *hart_mask)
|
||||
{
|
||||
unsigned long hartid, hmask_val, hbase;
|
||||
struct cpumask tmask;
|
||||
struct sbiret ret = {0};
|
||||
int result;
|
||||
|
||||
if (!hart_mask || !(*hart_mask)) {
|
||||
riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
|
||||
hart_mask = cpumask_bits(&tmask);
|
||||
}
|
||||
|
||||
hmask_val = 0;
|
||||
hbase = 0;
|
||||
for_each_set_bit(hartid, hart_mask, NR_CPUS) {
|
||||
if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
|
||||
hmask_val, hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
hmask_val = 0;
|
||||
hbase = 0;
|
||||
}
|
||||
if (!hmask_val)
|
||||
hbase = hartid;
|
||||
hmask_val |= 1UL << (hartid - hbase);
|
||||
}
|
||||
|
||||
if (hmask_val) {
|
||||
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
|
||||
hmask_val, hbase, 0, 0, 0, 0);
|
||||
if (ret.error)
|
||||
goto ecall_failed;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
ecall_failed:
|
||||
result = sbi_err_map_linux_errno(ret.error);
|
||||
pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
|
||||
__func__, hbase, hmask_val, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask_val,
|
||||
unsigned long hbase, unsigned long start,
|
||||
unsigned long size, unsigned long arg4,
|
||||
unsigned long arg5)
|
||||
{
|
||||
struct sbiret ret = {0};
|
||||
int ext = SBI_EXT_RFENCE;
|
||||
int result = 0;
|
||||
|
||||
switch (fid) {
|
||||
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, 0, 0, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, arg4, 0);
|
||||
break;
|
||||
|
||||
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, arg4, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, 0, 0);
|
||||
break;
|
||||
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
|
||||
ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
|
||||
size, arg4, 0);
|
||||
break;
|
||||
default:
|
||||
pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
|
||||
fid, ext);
|
||||
result = -EINVAL;
|
||||
}
|
||||
|
||||
if (ret.error) {
|
||||
result = sbi_err_map_linux_errno(ret.error);
|
||||
pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
|
||||
__func__, hbase, hmask_val, result);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static int __sbi_rfence_v02(int fid, const unsigned long *hart_mask,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long arg4, unsigned long arg5)
|
||||
{
|
||||
unsigned long hmask_val, hartid, hbase;
|
||||
struct cpumask tmask;
|
||||
int result;
|
||||
|
||||
if (!hart_mask || !(*hart_mask)) {
|
||||
riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
|
||||
hart_mask = cpumask_bits(&tmask);
|
||||
}
|
||||
|
||||
hmask_val = 0;
|
||||
hbase = 0;
|
||||
for_each_set_bit(hartid, hart_mask, NR_CPUS) {
|
||||
if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
|
||||
start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
hmask_val = 0;
|
||||
hbase = 0;
|
||||
}
|
||||
if (!hmask_val)
|
||||
hbase = hartid;
|
||||
hmask_val |= 1UL << (hartid - hbase);
|
||||
}
|
||||
|
||||
if (hmask_val) {
|
||||
result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
|
||||
start, size, arg4, arg5);
|
||||
if (result)
|
||||
return result;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sbi_set_timer() - Program the timer for next timer event.
|
||||
* @stime_value: The value after which next timer event should fire.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_set_timer(uint64_t stime_value)
|
||||
{
|
||||
__sbi_set_timer(stime_value);
|
||||
}
|
||||
|
||||
/**
|
||||
* sbi_send_ipi() - Send an IPI to any hart.
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_send_ipi(const unsigned long *hart_mask)
|
||||
{
|
||||
__sbi_send_ipi(hart_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_send_ipi);
|
||||
|
||||
/**
|
||||
* sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_remote_fence_i(const unsigned long *hart_mask)
|
||||
{
|
||||
__sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
|
||||
hart_mask, 0, 0, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_fence_i);
|
||||
|
||||
/**
|
||||
* sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
|
||||
* harts for the specified virtual address range.
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the virtual address
|
||||
* @size: Total size of the virtual address range.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_remote_sfence_vma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
__sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
|
||||
hart_mask, start, size, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_sfence_vma);
|
||||
|
||||
/**
|
||||
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
|
||||
* remote harts for a virtual address range belonging to a specific ASID.
|
||||
*
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the virtual address
|
||||
* @size: Total size of the virtual address range.
|
||||
* @asid: The value of address space identifier (ASID).
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long asid)
|
||||
{
|
||||
__sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
|
||||
hart_mask, start, size, asid, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
|
||||
|
||||
/**
|
||||
* sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
|
||||
* harts for the specified guest physical address range.
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the guest physical address
|
||||
* @size: Total size of the guest physical address range.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
|
||||
hart_mask, start, size, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
|
||||
|
||||
/**
|
||||
* sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
|
||||
* remote harts for a guest physical address range belonging to a specific VMID.
|
||||
*
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the guest physical address
|
||||
* @size: Total size of the guest physical address range.
|
||||
* @vmid: The value of guest ID (VMID).
|
||||
*
|
||||
* Return: 0 if success, Error otherwise.
|
||||
*/
|
||||
int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long vmid)
|
||||
{
|
||||
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
|
||||
hart_mask, start, size, vmid, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
|
||||
|
||||
/**
|
||||
* sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
|
||||
* harts for the current guest virtual address range.
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the current guest virtual address
|
||||
* @size: Total size of the current guest virtual address range.
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size)
|
||||
{
|
||||
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
|
||||
hart_mask, start, size, 0, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_hfence_vvma);
|
||||
|
||||
/**
|
||||
* sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
|
||||
* remote harts for current guest virtual address range belonging to a specific
|
||||
* ASID.
|
||||
*
|
||||
* @hart_mask: A cpu mask containing all the target harts.
|
||||
* @start: Start of the current guest virtual address
|
||||
* @size: Total size of the current guest virtual address range.
|
||||
* @asid: The value of address space identifier (ASID).
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
|
||||
unsigned long start,
|
||||
unsigned long size,
|
||||
unsigned long asid)
|
||||
{
|
||||
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
|
||||
hart_mask, start, size, asid, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
|
||||
|
||||
/**
|
||||
* sbi_probe_extension() - Check if an SBI extension ID is supported or not.
|
||||
* @extid: The extension ID to be probed.
|
||||
*
|
||||
* Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
|
||||
*/
|
||||
int sbi_probe_extension(int extid)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
|
||||
0, 0, 0, 0, 0);
|
||||
if (!ret.error)
|
||||
if (ret.value)
|
||||
return ret.value;
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
EXPORT_SYMBOL(sbi_probe_extension);
|
||||
|
||||
static long __sbi_base_ecall(int fid)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
|
||||
if (!ret.error)
|
||||
return ret.value;
|
||||
else
|
||||
return sbi_err_map_linux_errno(ret.error);
|
||||
}
|
||||
|
||||
static inline long sbi_get_spec_version(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
|
||||
}
|
||||
|
||||
static inline long sbi_get_firmware_id(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
|
||||
}
|
||||
|
||||
static inline long sbi_get_firmware_version(void)
|
||||
{
|
||||
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
|
||||
}
|
||||
|
||||
static void sbi_power_off(void)
|
||||
{
|
||||
sbi_shutdown();
|
||||
}
|
||||
|
||||
static int __init sbi_init(void)
|
||||
int __init sbi_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pm_power_off = sbi_power_off;
|
||||
ret = sbi_get_spec_version();
|
||||
if (ret > 0)
|
||||
sbi_spec_version = ret;
|
||||
|
||||
pr_info("SBI specification v%lu.%lu detected\n",
|
||||
sbi_major_version(), sbi_minor_version());
|
||||
|
||||
if (!sbi_spec_is_0_1()) {
|
||||
pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
|
||||
sbi_get_firmware_id(), sbi_get_firmware_version());
|
||||
if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
|
||||
__sbi_set_timer = __sbi_set_timer_v02;
|
||||
pr_info("SBI v0.2 TIME extension detected\n");
|
||||
} else {
|
||||
__sbi_set_timer = __sbi_set_timer_v01;
|
||||
}
|
||||
if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
|
||||
__sbi_send_ipi = __sbi_send_ipi_v02;
|
||||
pr_info("SBI v0.2 IPI extension detected\n");
|
||||
} else {
|
||||
__sbi_send_ipi = __sbi_send_ipi_v01;
|
||||
}
|
||||
if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
|
||||
__sbi_rfence = __sbi_rfence_v02;
|
||||
pr_info("SBI v0.2 RFENCE extension detected\n");
|
||||
} else {
|
||||
__sbi_rfence = __sbi_rfence_v01;
|
||||
}
|
||||
} else {
|
||||
__sbi_set_timer = __sbi_set_timer_v01;
|
||||
__sbi_send_ipi = __sbi_send_ipi_v01;
|
||||
__sbi_rfence = __sbi_rfence_v01;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(sbi_init);
|
||||
|
@ -16,12 +16,14 @@
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/clint.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/kasan.h>
|
||||
@ -39,9 +41,14 @@ struct screen_info screen_info = {
|
||||
};
|
||||
#endif
|
||||
|
||||
/* The lucky hart to first increment this variable will boot the other cores */
|
||||
atomic_t hart_lottery;
|
||||
/*
|
||||
* The lucky hart to first increment this variable will boot the other cores.
|
||||
* This is used before the kernel initializes the BSS so it can't be in the
|
||||
* BSS.
|
||||
*/
|
||||
atomic_t hart_lottery __section(.sdata);
|
||||
unsigned long boot_cpu_hartid;
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||
|
||||
void __init parse_dtb(void)
|
||||
{
|
||||
@ -79,9 +86,28 @@ void __init setup_arch(char **cmdline_p)
|
||||
kasan_init();
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_RISCV_SBI)
|
||||
sbi_init();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
setup_smp();
|
||||
#endif
|
||||
|
||||
riscv_fill_hwcap();
|
||||
}
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct cpu *cpu = &per_cpu(cpu_devices, i);
|
||||
|
||||
cpu->hotpluggable = cpu_has_hotplug(i);
|
||||
register_cpu(cpu, i);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(topology_init);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <asm/clint.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -34,8 +35,6 @@
|
||||
|
||||
#include "head.h"
|
||||
|
||||
void *__cpu_up_stack_pointer[NR_CPUS];
|
||||
void *__cpu_up_task_pointer[NR_CPUS];
|
||||
static DECLARE_COMPLETION(cpu_running);
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
@ -46,6 +45,7 @@ void __init smp_prepare_boot_cpu(void)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int cpuid;
|
||||
int ret;
|
||||
|
||||
/* This covers non-smp usecase mandated by "nosmp" option */
|
||||
if (max_cpus == 0)
|
||||
@ -54,6 +54,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
for_each_possible_cpu(cpuid) {
|
||||
if (cpuid == smp_processor_id())
|
||||
continue;
|
||||
if (cpu_ops[cpuid]->cpu_prepare) {
|
||||
ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
|
||||
if (ret)
|
||||
continue;
|
||||
}
|
||||
set_cpu_present(cpuid, true);
|
||||
}
|
||||
}
|
||||
@ -65,6 +70,8 @@ void __init setup_smp(void)
|
||||
bool found_boot_cpu = false;
|
||||
int cpuid = 1;
|
||||
|
||||
cpu_set_ops(0);
|
||||
|
||||
for_each_of_cpu_node(dn) {
|
||||
hart = riscv_of_processor_hartid(dn);
|
||||
if (hart < 0)
|
||||
@ -92,36 +99,38 @@ void __init setup_smp(void)
|
||||
cpuid, nr_cpu_ids);
|
||||
|
||||
for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
|
||||
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
|
||||
if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
|
||||
cpu_set_ops(cpuid);
|
||||
set_cpu_possible(cpuid, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int start_secondary_cpu(int cpu, struct task_struct *tidle)
|
||||
{
|
||||
if (cpu_ops[cpu]->cpu_start)
|
||||
return cpu_ops[cpu]->cpu_start(cpu, tidle);
|
||||
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
int ret = 0;
|
||||
int hartid = cpuid_to_hartid_map(cpu);
|
||||
tidle->thread_info.cpu = cpu;
|
||||
|
||||
/*
|
||||
* On RISC-V systems, all harts boot on their own accord. Our _start
|
||||
* selects the first hart to boot the kernel and causes the remainder
|
||||
* of the harts to spin in a loop waiting for their stack pointer to be
|
||||
* setup by that main hart. Writing __cpu_up_stack_pointer signals to
|
||||
* the spinning harts that they can continue the boot process.
|
||||
*/
|
||||
smp_mb();
|
||||
WRITE_ONCE(__cpu_up_stack_pointer[hartid],
|
||||
task_stack_page(tidle) + THREAD_SIZE);
|
||||
WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
|
||||
|
||||
lockdep_assert_held(&cpu_running);
|
||||
wait_for_completion_timeout(&cpu_running,
|
||||
ret = start_secondary_cpu(cpu, tidle);
|
||||
if (!ret) {
|
||||
lockdep_assert_held(&cpu_running);
|
||||
wait_for_completion_timeout(&cpu_running,
|
||||
msecs_to_jiffies(1000));
|
||||
|
||||
if (!cpu_online(cpu)) {
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
ret = -EIO;
|
||||
if (!cpu_online(cpu)) {
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
ret = -EIO;
|
||||
}
|
||||
} else {
|
||||
pr_crit("CPU%u: failed to start\n", cpu);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -134,7 +143,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
/*
|
||||
* C entry point for a secondary processor.
|
||||
*/
|
||||
asmlinkage __visible void __init smp_callin(void)
|
||||
asmlinkage __visible void smp_callin(void)
|
||||
{
|
||||
struct mm_struct *mm = &init_mm;
|
||||
|
||||
|
28
arch/riscv/kernel/soc.c
Normal file
28
arch/riscv/kernel/soc.c
Normal file
@ -0,0 +1,28 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/soc.h>
|
||||
|
||||
/*
|
||||
* This is called extremly early, before parse_dtb(), to allow initializing
|
||||
* SoC hardware before memory or any device driver initialization.
|
||||
*/
|
||||
void __init soc_early_init(void)
|
||||
{
|
||||
void (*early_fn)(const void *fdt);
|
||||
const struct of_device_id *s;
|
||||
const void *fdt = dtb_early_va;
|
||||
|
||||
for (s = (void *)&__soc_early_init_table_start;
|
||||
(void *)s < (void *)&__soc_early_init_table_end; s++) {
|
||||
if (!fdt_node_check_compatible(fdt, 0, s->compatible)) {
|
||||
early_fn = s->data;
|
||||
early_fn(fdt);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
@ -19,6 +19,8 @@ struct stackframe {
|
||||
unsigned long ra;
|
||||
};
|
||||
|
||||
register unsigned long sp_in_global __asm__("sp");
|
||||
|
||||
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(unsigned long, void *), void *arg)
|
||||
{
|
||||
@ -29,7 +31,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
const register unsigned long current_sp __asm__ ("sp");
|
||||
const register unsigned long current_sp = sp_in_global;
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
sp = current_sp;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
@ -73,8 +75,7 @@ static void notrace walk_stackframe(struct task_struct *task,
|
||||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
const register unsigned long current_sp __asm__ ("sp");
|
||||
sp = current_sp;
|
||||
sp = sp_in_global;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
|
@ -97,12 +97,33 @@ DO_ERROR_INFO(do_trap_insn_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "instruction access fault");
|
||||
DO_ERROR_INFO(do_trap_insn_illegal,
|
||||
SIGILL, ILL_ILLOPC, "illegal instruction");
|
||||
DO_ERROR_INFO(do_trap_load_misaligned,
|
||||
SIGBUS, BUS_ADRALN, "load address misaligned");
|
||||
DO_ERROR_INFO(do_trap_load_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "load access fault");
|
||||
#ifndef CONFIG_RISCV_M_MODE
|
||||
DO_ERROR_INFO(do_trap_load_misaligned,
|
||||
SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
|
||||
DO_ERROR_INFO(do_trap_store_misaligned,
|
||||
SIGBUS, BUS_ADRALN, "store (or AMO) address misaligned");
|
||||
SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
|
||||
#else
|
||||
int handle_misaligned_load(struct pt_regs *regs);
|
||||
int handle_misaligned_store(struct pt_regs *regs);
|
||||
|
||||
asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
if (!handle_misaligned_load(regs))
|
||||
return;
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - load address misaligned");
|
||||
}
|
||||
|
||||
asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
if (!handle_misaligned_store(regs))
|
||||
return;
|
||||
do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
|
||||
"Oops - store (or AMO) address misaligned");
|
||||
}
|
||||
#endif
|
||||
DO_ERROR_INFO(do_trap_store_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
|
||||
DO_ERROR_INFO(do_trap_ecall_u,
|
||||
@ -118,7 +139,8 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
|
||||
|
||||
if (probe_kernel_address((bug_insn_t *)pc, insn))
|
||||
return 0;
|
||||
return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
|
||||
|
||||
return GET_INSN_LENGTH(insn);
|
||||
}
|
||||
|
||||
asmlinkage __visible void do_trap_break(struct pt_regs *regs)
|
||||
@ -147,7 +169,7 @@ int is_valid_bugaddr(unsigned long pc)
|
||||
}
|
||||
#endif /* CONFIG_GENERIC_BUG */
|
||||
|
||||
void __init trap_init(void)
|
||||
void trap_init(void)
|
||||
{
|
||||
/*
|
||||
* Set sup0 scratch register to 0, indicating to exception vector
|
||||
|
370
arch/riscv/kernel/traps_misaligned.c
Normal file
370
arch/riscv/kernel/traps_misaligned.c
Normal file
@ -0,0 +1,370 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
#define INSN_MATCH_LB 0x3
|
||||
#define INSN_MASK_LB 0x707f
|
||||
#define INSN_MATCH_LH 0x1003
|
||||
#define INSN_MASK_LH 0x707f
|
||||
#define INSN_MATCH_LW 0x2003
|
||||
#define INSN_MASK_LW 0x707f
|
||||
#define INSN_MATCH_LD 0x3003
|
||||
#define INSN_MASK_LD 0x707f
|
||||
#define INSN_MATCH_LBU 0x4003
|
||||
#define INSN_MASK_LBU 0x707f
|
||||
#define INSN_MATCH_LHU 0x5003
|
||||
#define INSN_MASK_LHU 0x707f
|
||||
#define INSN_MATCH_LWU 0x6003
|
||||
#define INSN_MASK_LWU 0x707f
|
||||
#define INSN_MATCH_SB 0x23
|
||||
#define INSN_MASK_SB 0x707f
|
||||
#define INSN_MATCH_SH 0x1023
|
||||
#define INSN_MASK_SH 0x707f
|
||||
#define INSN_MATCH_SW 0x2023
|
||||
#define INSN_MASK_SW 0x707f
|
||||
#define INSN_MATCH_SD 0x3023
|
||||
#define INSN_MASK_SD 0x707f
|
||||
|
||||
#define INSN_MATCH_FLW 0x2007
|
||||
#define INSN_MASK_FLW 0x707f
|
||||
#define INSN_MATCH_FLD 0x3007
|
||||
#define INSN_MASK_FLD 0x707f
|
||||
#define INSN_MATCH_FLQ 0x4007
|
||||
#define INSN_MASK_FLQ 0x707f
|
||||
#define INSN_MATCH_FSW 0x2027
|
||||
#define INSN_MASK_FSW 0x707f
|
||||
#define INSN_MATCH_FSD 0x3027
|
||||
#define INSN_MASK_FSD 0x707f
|
||||
#define INSN_MATCH_FSQ 0x4027
|
||||
#define INSN_MASK_FSQ 0x707f
|
||||
|
||||
#define INSN_MATCH_C_LD 0x6000
|
||||
#define INSN_MASK_C_LD 0xe003
|
||||
#define INSN_MATCH_C_SD 0xe000
|
||||
#define INSN_MASK_C_SD 0xe003
|
||||
#define INSN_MATCH_C_LW 0x4000
|
||||
#define INSN_MASK_C_LW 0xe003
|
||||
#define INSN_MATCH_C_SW 0xc000
|
||||
#define INSN_MASK_C_SW 0xe003
|
||||
#define INSN_MATCH_C_LDSP 0x6002
|
||||
#define INSN_MASK_C_LDSP 0xe003
|
||||
#define INSN_MATCH_C_SDSP 0xe002
|
||||
#define INSN_MASK_C_SDSP 0xe003
|
||||
#define INSN_MATCH_C_LWSP 0x4002
|
||||
#define INSN_MASK_C_LWSP 0xe003
|
||||
#define INSN_MATCH_C_SWSP 0xc002
|
||||
#define INSN_MASK_C_SWSP 0xe003
|
||||
|
||||
#define INSN_MATCH_C_FLD 0x2000
|
||||
#define INSN_MASK_C_FLD 0xe003
|
||||
#define INSN_MATCH_C_FLW 0x6000
|
||||
#define INSN_MASK_C_FLW 0xe003
|
||||
#define INSN_MATCH_C_FSD 0xa000
|
||||
#define INSN_MASK_C_FSD 0xe003
|
||||
#define INSN_MATCH_C_FSW 0xe000
|
||||
#define INSN_MASK_C_FSW 0xe003
|
||||
#define INSN_MATCH_C_FLDSP 0x2002
|
||||
#define INSN_MASK_C_FLDSP 0xe003
|
||||
#define INSN_MATCH_C_FSDSP 0xa002
|
||||
#define INSN_MASK_C_FSDSP 0xe003
|
||||
#define INSN_MATCH_C_FLWSP 0x6002
|
||||
#define INSN_MASK_C_FLWSP 0xe003
|
||||
#define INSN_MATCH_C_FSWSP 0xe002
|
||||
#define INSN_MASK_C_FSWSP 0xe003
|
||||
|
||||
#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
#define LOG_REGBYTES 3
|
||||
#define XLEN 64
|
||||
#else
|
||||
#define LOG_REGBYTES 2
|
||||
#define XLEN 32
|
||||
#endif
|
||||
#define REGBYTES (1 << LOG_REGBYTES)
|
||||
#define XLEN_MINUS_16 ((XLEN) - 16)
|
||||
|
||||
#define SH_RD 7
|
||||
#define SH_RS1 15
|
||||
#define SH_RS2 20
|
||||
#define SH_RS2C 2
|
||||
|
||||
#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
|
||||
#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
|
||||
(RV_X(x, 10, 3) << 3) | \
|
||||
(RV_X(x, 5, 1) << 6))
|
||||
#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
|
||||
(RV_X(x, 5, 2) << 6))
|
||||
#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
|
||||
(RV_X(x, 12, 1) << 5) | \
|
||||
(RV_X(x, 2, 2) << 6))
|
||||
#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
|
||||
(RV_X(x, 12, 1) << 5) | \
|
||||
(RV_X(x, 2, 3) << 6))
|
||||
#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
|
||||
(RV_X(x, 7, 2) << 6))
|
||||
#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
|
||||
(RV_X(x, 7, 3) << 6))
|
||||
#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
|
||||
#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
|
||||
#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
|
||||
|
||||
#define SHIFT_RIGHT(x, y) \
|
||||
((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
|
||||
|
||||
#define REG_MASK \
|
||||
((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
|
||||
|
||||
#define REG_OFFSET(insn, pos) \
|
||||
(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
|
||||
|
||||
#define REG_PTR(insn, pos, regs) \
|
||||
(ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
|
||||
|
||||
#define GET_RM(insn) (((insn) >> 12) & 7)
|
||||
|
||||
#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
|
||||
#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
|
||||
#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
|
||||
#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
|
||||
#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
|
||||
#define GET_SP(regs) (*REG_PTR(2, 0, regs))
|
||||
#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
|
||||
#define IMM_I(insn) ((s32)(insn) >> 20)
|
||||
#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
|
||||
(s32)(((insn) >> 7) & 0x1f))
|
||||
#define MASK_FUNCT3 0x7000
|
||||
|
||||
#define GET_PRECISION(insn) (((insn) >> 25) & 3)
|
||||
#define GET_RM(insn) (((insn) >> 12) & 7)
|
||||
#define PRECISION_S 0
|
||||
#define PRECISION_D 1
|
||||
|
||||
#define STR(x) XSTR(x)
|
||||
#define XSTR(x) #x
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
|
||||
static inline type load_##type(const type *addr) \
|
||||
{ \
|
||||
type val; \
|
||||
asm (#insn " %0, %1" \
|
||||
: "=&r" (val) : "m" (*addr)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
|
||||
static inline void store_##type(type *addr, type val) \
|
||||
{ \
|
||||
asm volatile (#insn " %0, %1\n" \
|
||||
: : "r" (val), "m" (*addr)); \
|
||||
}
|
||||
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
|
||||
#if defined(CONFIG_64BIT)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
|
||||
#else
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
|
||||
|
||||
static inline u64 load_u64(const u64 *addr)
|
||||
{
|
||||
return load_u32((u32 *)addr)
|
||||
+ ((u64)load_u32((u32 *)addr + 1) << 32);
|
||||
}
|
||||
|
||||
static inline void store_u64(u64 *addr, u64 val)
|
||||
{
|
||||
store_u32((u32 *)addr, val);
|
||||
store_u32((u32 *)addr + 1, val >> 32);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline ulong get_insn(ulong mepc)
|
||||
{
|
||||
register ulong __mepc asm ("a2") = mepc;
|
||||
ulong val, rvc_mask = 3, tmp;
|
||||
|
||||
asm ("and %[tmp], %[addr], 2\n"
|
||||
"bnez %[tmp], 1f\n"
|
||||
#if defined(CONFIG_64BIT)
|
||||
STR(LWU) " %[insn], (%[addr])\n"
|
||||
#else
|
||||
STR(LW) " %[insn], (%[addr])\n"
|
||||
#endif
|
||||
"and %[tmp], %[insn], %[rvc_mask]\n"
|
||||
"beq %[tmp], %[rvc_mask], 2f\n"
|
||||
"sll %[insn], %[insn], %[xlen_minus_16]\n"
|
||||
"srl %[insn], %[insn], %[xlen_minus_16]\n"
|
||||
"j 2f\n"
|
||||
"1:\n"
|
||||
"lhu %[insn], (%[addr])\n"
|
||||
"and %[tmp], %[insn], %[rvc_mask]\n"
|
||||
"bne %[tmp], %[rvc_mask], 2f\n"
|
||||
"lhu %[tmp], 2(%[addr])\n"
|
||||
"sll %[tmp], %[tmp], 16\n"
|
||||
"add %[insn], %[insn], %[tmp]\n"
|
||||
"2:"
|
||||
: [insn] "=&r" (val), [tmp] "=&r" (tmp)
|
||||
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
|
||||
[xlen_minus_16] "i" (XLEN_MINUS_16));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
union reg_data {
|
||||
u8 data_bytes[8];
|
||||
ulong data_ulong;
|
||||
u64 data_u64;
|
||||
};
|
||||
|
||||
int handle_misaligned_load(struct pt_regs *regs)
|
||||
{
|
||||
union reg_data val;
|
||||
unsigned long epc = regs->epc;
|
||||
unsigned long insn = get_insn(epc);
|
||||
unsigned long addr = csr_read(mtval);
|
||||
int i, fp = 0, shift = 0, len = 0;
|
||||
|
||||
regs->epc = 0;
|
||||
|
||||
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
|
||||
len = 4;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
#if defined(CONFIG_64BIT)
|
||||
} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
|
||||
len = 8;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
|
||||
len = 4;
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
} else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
|
||||
len = 2;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
|
||||
len = 2;
|
||||
#if defined(CONFIG_64BIT)
|
||||
} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
|
||||
len = 8;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
len = 8;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
|
||||
len = 4;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
len = 4;
|
||||
shift = 8 * (sizeof(unsigned long) - len);
|
||||
} else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
#if defined(CONFIG_32BIT)
|
||||
} else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
insn = RVC_RS2S(insn) << SH_RD;
|
||||
} else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
#endif
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
}
|
||||
|
||||
val.data_u64 = 0;
|
||||
for (i = 0; i < len; i++)
|
||||
val.data_bytes[i] = load_u8((void *)(addr + i));
|
||||
|
||||
if (fp)
|
||||
return -1;
|
||||
SET_RD(insn, regs, val.data_ulong << shift >> shift);
|
||||
|
||||
regs->epc = epc + INSN_LEN(insn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int handle_misaligned_store(struct pt_regs *regs)
|
||||
{
|
||||
union reg_data val;
|
||||
unsigned long epc = regs->epc;
|
||||
unsigned long insn = get_insn(epc);
|
||||
unsigned long addr = csr_read(mtval);
|
||||
int i, len = 0;
|
||||
|
||||
regs->epc = 0;
|
||||
|
||||
val.data_ulong = GET_RS2(insn, regs);
|
||||
|
||||
if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
|
||||
len = 4;
|
||||
#if defined(CONFIG_64BIT)
|
||||
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
|
||||
len = 8;
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
|
||||
len = 2;
|
||||
#if defined(CONFIG_64BIT)
|
||||
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
|
||||
len = 8;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
len = 8;
|
||||
val.data_ulong = GET_RS2C(insn, regs);
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
|
||||
len = 4;
|
||||
val.data_ulong = GET_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
len = 4;
|
||||
val.data_ulong = GET_RS2C(insn, regs);
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
store_u8((void *)(addr + i), val.data_bytes[i]);
|
||||
|
||||
regs->epc = epc + INSN_LEN(insn);
|
||||
|
||||
return 0;
|
||||
}
|
@ -9,7 +9,9 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <linux/sizes.h>
|
||||
OUTPUT_ARCH(riscv)
|
||||
ENTRY(_start)
|
||||
|
||||
@ -20,10 +22,18 @@ SECTIONS
|
||||
/* Beginning of code and text segment */
|
||||
. = LOAD_OFFSET;
|
||||
_start = .;
|
||||
__init_begin = .;
|
||||
HEAD_TEXT_SECTION
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
__init_begin = .;
|
||||
INIT_TEXT_SECTION(PAGE_SIZE)
|
||||
INIT_DATA_SECTION(16)
|
||||
. = ALIGN(8);
|
||||
__soc_early_init_table : {
|
||||
__soc_early_init_table_start = .;
|
||||
KEEP(*(__soc_early_init_table))
|
||||
__soc_early_init_table_end = .;
|
||||
}
|
||||
/* we have to discard exit text and such at runtime, not link time */
|
||||
.exit.text :
|
||||
{
|
||||
@ -36,6 +46,7 @@ SECTIONS
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
__init_end = .;
|
||||
|
||||
. = ALIGN(SECTION_ALIGN);
|
||||
.text : {
|
||||
_text = .;
|
||||
_stext = .;
|
||||
@ -53,24 +64,26 @@ SECTIONS
|
||||
|
||||
/* Start of data section */
|
||||
_sdata = .;
|
||||
RO_DATA(L1_CACHE_BYTES)
|
||||
RO_DATA(SECTION_ALIGN)
|
||||
.srodata : {
|
||||
*(.srodata*)
|
||||
}
|
||||
|
||||
EXCEPTION_TABLE(0x10)
|
||||
|
||||
. = ALIGN(SECTION_ALIGN);
|
||||
_data = .;
|
||||
|
||||
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
.sdata : {
|
||||
__global_pointer$ = . + 0x800;
|
||||
*(.sdata*)
|
||||
/* End of data section */
|
||||
_edata = .;
|
||||
*(.sbss*)
|
||||
}
|
||||
|
||||
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
|
||||
|
||||
EXCEPTION_TABLE(0x10)
|
||||
|
||||
.rel.dyn : {
|
||||
*(.rel.dyn*)
|
||||
}
|
||||
|
@ -3,14 +3,12 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
.altmacro
|
||||
.macro fixup op reg addr lbl
|
||||
LOCAL _epc
|
||||
_epc:
|
||||
100:
|
||||
\op \reg, \addr
|
||||
.section __ex_table,"a"
|
||||
.balign RISCV_SZPTR
|
||||
RISCV_PTR _epc, \lbl
|
||||
RISCV_PTR 100b, \lbl
|
||||
.previous
|
||||
.endm
|
||||
|
||||
|
@ -7,7 +7,7 @@ endif
|
||||
|
||||
obj-y += init.o
|
||||
obj-y += extable.o
|
||||
obj-$(CONFIG_MMU) += fault.o
|
||||
obj-$(CONFIG_MMU) += fault.o pageattr.o
|
||||
obj-y += cacheflush.o
|
||||
obj-y += context.o
|
||||
|
||||
@ -15,6 +15,7 @@ ifeq ($(CONFIG_MMU),y)
|
||||
obj-$(CONFIG_SMP) += tlbflush.o
|
||||
endif
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
|
||||
obj-$(CONFIG_KASAN) += kasan_init.o
|
||||
|
||||
ifdef CONFIG_KASAN
|
||||
|
@ -4,14 +4,12 @@
|
||||
|
||||
int pud_huge(pud_t pud)
|
||||
{
|
||||
return pud_present(pud) &&
|
||||
(pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
|
||||
return pud_leaf(pud);
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return pmd_present(pmd) &&
|
||||
(pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
|
||||
return pmd_leaf(pmd);
|
||||
}
|
||||
|
||||
static __init int setup_hugepagesz(char *opt)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/set_memory.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -477,6 +478,17 @@ static void __init setup_vm_final(void)
|
||||
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long init_begin = (unsigned long)__init_begin;
|
||||
unsigned long init_end = (unsigned long)__init_end;
|
||||
|
||||
/* Make the region as non-execuatble. */
|
||||
set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
#else
|
||||
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
|
||||
{
|
||||
@ -488,6 +500,38 @@ static inline void setup_vm_final(void)
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_STRICT_KERNEL_RWX
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
unsigned long text_start = (unsigned long)_text;
|
||||
unsigned long text_end = (unsigned long)_etext;
|
||||
|
||||
set_memory_rw(text_start, (text_end - text_start) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
unsigned long text_start = (unsigned long)_text;
|
||||
unsigned long text_end = (unsigned long)_etext;
|
||||
|
||||
set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
unsigned long text_start = (unsigned long)_text;
|
||||
unsigned long text_end = (unsigned long)_etext;
|
||||
unsigned long rodata_start = (unsigned long)__start_rodata;
|
||||
unsigned long data_start = (unsigned long)_data;
|
||||
unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
|
||||
|
||||
set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
|
||||
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
|
||||
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
|
||||
set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
setup_vm_final();
|
||||
|
187
arch/riscv/mm/pageattr.c
Normal file
187
arch/riscv/mm/pageattr.c
Normal file
@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2019 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/pagewalk.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/bitops.h>
|
||||
|
||||
struct pageattr_masks {
|
||||
pgprot_t set_mask;
|
||||
pgprot_t clear_mask;
|
||||
};
|
||||
|
||||
static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
|
||||
{
|
||||
struct pageattr_masks *masks = walk->private;
|
||||
unsigned long new_val = val;
|
||||
|
||||
new_val &= ~(pgprot_val(masks->clear_mask));
|
||||
new_val |= (pgprot_val(masks->set_mask));
|
||||
|
||||
return new_val;
|
||||
}
|
||||
|
||||
static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pgd_t val = READ_ONCE(*pgd);
|
||||
|
||||
if (pgd_leaf(val)) {
|
||||
val = __pgd(set_pageattr_masks(pgd_val(val), walk));
|
||||
set_pgd(pgd, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
p4d_t val = READ_ONCE(*p4d);
|
||||
|
||||
if (p4d_leaf(val)) {
|
||||
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
|
||||
set_p4d(p4d, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pud_t val = READ_ONCE(*pud);
|
||||
|
||||
if (pud_leaf(val)) {
|
||||
val = __pud(set_pageattr_masks(pud_val(val), walk));
|
||||
set_pud(pud, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pmd_t val = READ_ONCE(*pmd);
|
||||
|
||||
if (pmd_leaf(val)) {
|
||||
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
|
||||
set_pmd(pmd, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
|
||||
unsigned long next, struct mm_walk *walk)
|
||||
{
|
||||
pte_t val = READ_ONCE(*pte);
|
||||
|
||||
val = __pte(set_pageattr_masks(pte_val(val), walk));
|
||||
set_pte(pte, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pageattr_pte_hole(unsigned long addr, unsigned long next,
|
||||
int depth, struct mm_walk *walk)
|
||||
{
|
||||
/* Nothing to do here */
|
||||
return 0;
|
||||
}
|
||||
|
||||
const static struct mm_walk_ops pageattr_ops = {
|
||||
.pgd_entry = pageattr_pgd_entry,
|
||||
.p4d_entry = pageattr_p4d_entry,
|
||||
.pud_entry = pageattr_pud_entry,
|
||||
.pmd_entry = pageattr_pmd_entry,
|
||||
.pte_entry = pageattr_pte_entry,
|
||||
.pte_hole = pageattr_pte_hole,
|
||||
};
|
||||
|
||||
static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
|
||||
pgprot_t clear_mask)
|
||||
{
|
||||
int ret;
|
||||
unsigned long start = addr;
|
||||
unsigned long end = start + PAGE_SIZE * numpages;
|
||||
struct pageattr_masks masks = {
|
||||
.set_mask = set_mask,
|
||||
.clear_mask = clear_mask
|
||||
};
|
||||
|
||||
if (!numpages)
|
||||
return 0;
|
||||
|
||||
down_read(&init_mm.mmap_sem);
|
||||
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
|
||||
&masks);
|
||||
up_read(&init_mm.mmap_sem);
|
||||
|
||||
flush_tlb_kernel_range(start, end);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int set_memory_ro(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
|
||||
__pgprot(_PAGE_WRITE));
|
||||
}
|
||||
|
||||
int set_memory_rw(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
|
||||
__pgprot(0));
|
||||
}
|
||||
|
||||
int set_memory_x(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
|
||||
}
|
||||
|
||||
int set_memory_nx(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
|
||||
}
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page)
|
||||
{
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
unsigned long end = start + PAGE_SIZE;
|
||||
struct pageattr_masks masks = {
|
||||
.set_mask = __pgprot(0),
|
||||
.clear_mask = __pgprot(_PAGE_PRESENT)
|
||||
};
|
||||
|
||||
return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
|
||||
}
|
||||
|
||||
int set_direct_map_default_noflush(struct page *page)
|
||||
{
|
||||
unsigned long start = (unsigned long)page_address(page);
|
||||
unsigned long end = start + PAGE_SIZE;
|
||||
struct pageattr_masks masks = {
|
||||
.set_mask = PAGE_KERNEL,
|
||||
.clear_mask = __pgprot(0)
|
||||
};
|
||||
|
||||
return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
|
||||
}
|
||||
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
if (!debug_pagealloc_enabled())
|
||||
return;
|
||||
|
||||
if (enable)
|
||||
__set_memory((unsigned long)page_address(page), numpages,
|
||||
__pgprot(_PAGE_PRESENT), __pgprot(0));
|
||||
else
|
||||
__set_memory((unsigned long)page_address(page), numpages,
|
||||
__pgprot(0), __pgprot(_PAGE_PRESENT));
|
||||
}
|
317
arch/riscv/mm/ptdump.c
Normal file
317
arch/riscv/mm/ptdump.c
Normal file
@ -0,0 +1,317 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2019 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/ptdump.h>
|
||||
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/kasan.h>
|
||||
|
||||
#define pt_dump_seq_printf(m, fmt, args...) \
|
||||
({ \
|
||||
if (m) \
|
||||
seq_printf(m, fmt, ##args); \
|
||||
})
|
||||
|
||||
#define pt_dump_seq_puts(m, fmt) \
|
||||
({ \
|
||||
if (m) \
|
||||
seq_printf(m, fmt); \
|
||||
})
|
||||
|
||||
/*
|
||||
* The page dumper groups page table entries of the same type into a single
|
||||
* description. It uses pg_state to track the range information while
|
||||
* iterating over the pte entries. When the continuity is broken it then
|
||||
* dumps out a description of the range.
|
||||
*/
|
||||
struct pg_state {
|
||||
struct ptdump_state ptdump;
|
||||
struct seq_file *seq;
|
||||
const struct addr_marker *marker;
|
||||
unsigned long start_address;
|
||||
unsigned long start_pa;
|
||||
unsigned long last_pa;
|
||||
int level;
|
||||
u64 current_prot;
|
||||
bool check_wx;
|
||||
unsigned long wx_pages;
|
||||
};
|
||||
|
||||
/* Address marker */
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static struct addr_marker address_markers[] = {
|
||||
#ifdef CONFIG_KASAN
|
||||
{KASAN_SHADOW_START, "Kasan shadow start"},
|
||||
{KASAN_SHADOW_END, "Kasan shadow end"},
|
||||
#endif
|
||||
{FIXADDR_START, "Fixmap start"},
|
||||
{FIXADDR_TOP, "Fixmap end"},
|
||||
{PCI_IO_START, "PCI I/O start"},
|
||||
{PCI_IO_END, "PCI I/O end"},
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
{VMEMMAP_START, "vmemmap start"},
|
||||
{VMEMMAP_END, "vmemmap end"},
|
||||
#endif
|
||||
{VMALLOC_START, "vmalloc() area"},
|
||||
{VMALLOC_END, "vmalloc() end"},
|
||||
{PAGE_OFFSET, "Linear mapping"},
|
||||
{-1, NULL},
|
||||
};
|
||||
|
||||
/* Page Table Entry */
|
||||
struct prot_bits {
|
||||
u64 mask;
|
||||
u64 val;
|
||||
const char *set;
|
||||
const char *clear;
|
||||
};
|
||||
|
||||
static const struct prot_bits pte_bits[] = {
|
||||
{
|
||||
.mask = _PAGE_SOFT,
|
||||
.val = _PAGE_SOFT,
|
||||
.set = "RSW",
|
||||
.clear = " ",
|
||||
}, {
|
||||
.mask = _PAGE_DIRTY,
|
||||
.val = _PAGE_DIRTY,
|
||||
.set = "D",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_ACCESSED,
|
||||
.val = _PAGE_ACCESSED,
|
||||
.set = "A",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_GLOBAL,
|
||||
.val = _PAGE_GLOBAL,
|
||||
.set = "G",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_USER,
|
||||
.val = _PAGE_USER,
|
||||
.set = "U",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_EXEC,
|
||||
.val = _PAGE_EXEC,
|
||||
.set = "X",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_WRITE,
|
||||
.val = _PAGE_WRITE,
|
||||
.set = "W",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_READ,
|
||||
.val = _PAGE_READ,
|
||||
.set = "R",
|
||||
.clear = ".",
|
||||
}, {
|
||||
.mask = _PAGE_PRESENT,
|
||||
.val = _PAGE_PRESENT,
|
||||
.set = "V",
|
||||
.clear = ".",
|
||||
}
|
||||
};
|
||||
|
||||
/* Page Level */
|
||||
struct pg_level {
|
||||
const char *name;
|
||||
u64 mask;
|
||||
};
|
||||
|
||||
static struct pg_level pg_level[] = {
|
||||
{ /* pgd */
|
||||
.name = "PGD",
|
||||
}, { /* p4d */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
|
||||
}, { /* pud */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
|
||||
}, { /* pmd */
|
||||
.name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
|
||||
}, { /* pte */
|
||||
.name = "PTE",
|
||||
},
|
||||
};
|
||||
|
||||
static void dump_prot(struct pg_state *st)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
|
||||
const char *s;
|
||||
|
||||
if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
|
||||
s = pte_bits[i].set;
|
||||
else
|
||||
s = pte_bits[i].clear;
|
||||
|
||||
if (s)
|
||||
pt_dump_seq_printf(st->seq, " %s", s);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ADDR_FORMAT "0x%016lx"
|
||||
#else
|
||||
#define ADDR_FORMAT "0x%08lx"
|
||||
#endif
|
||||
static void dump_addr(struct pg_state *st, unsigned long addr)
|
||||
{
|
||||
static const char units[] = "KMGTPE";
|
||||
const char *unit = units;
|
||||
unsigned long delta;
|
||||
|
||||
pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT " ",
|
||||
st->start_address, addr);
|
||||
|
||||
pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
|
||||
delta = (addr - st->start_address) >> 10;
|
||||
|
||||
while (!(delta & 1023) && unit[1]) {
|
||||
delta >>= 10;
|
||||
unit++;
|
||||
}
|
||||
|
||||
pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
|
||||
pg_level[st->level].name);
|
||||
}
|
||||
|
||||
static void note_prot_wx(struct pg_state *st, unsigned long addr)
|
||||
{
|
||||
if (!st->check_wx)
|
||||
return;
|
||||
|
||||
if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
|
||||
(_PAGE_WRITE | _PAGE_EXEC))
|
||||
return;
|
||||
|
||||
WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
|
||||
(void *)st->start_address, (void *)st->start_address);
|
||||
|
||||
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
static void note_page(struct ptdump_state *pt_st, unsigned long addr,
|
||||
int level, unsigned long val)
|
||||
{
|
||||
struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
|
||||
u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
|
||||
u64 prot = 0;
|
||||
|
||||
if (level >= 0)
|
||||
prot = val & pg_level[level].mask;
|
||||
|
||||
if (st->level == -1) {
|
||||
st->level = level;
|
||||
st->current_prot = prot;
|
||||
st->start_address = addr;
|
||||
st->start_pa = pa;
|
||||
st->last_pa = pa;
|
||||
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
|
||||
} else if (prot != st->current_prot ||
|
||||
level != st->level || addr >= st->marker[1].start_address) {
|
||||
if (st->current_prot) {
|
||||
note_prot_wx(st, addr);
|
||||
dump_addr(st, addr);
|
||||
dump_prot(st);
|
||||
pt_dump_seq_puts(st->seq, "\n");
|
||||
}
|
||||
|
||||
while (addr >= st->marker[1].start_address) {
|
||||
st->marker++;
|
||||
pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
|
||||
st->marker->name);
|
||||
}
|
||||
|
||||
st->start_address = addr;
|
||||
st->start_pa = pa;
|
||||
st->last_pa = pa;
|
||||
st->current_prot = prot;
|
||||
st->level = level;
|
||||
} else {
|
||||
st->last_pa = pa;
|
||||
}
|
||||
}
|
||||
|
||||
static void ptdump_walk(struct seq_file *s)
|
||||
{
|
||||
struct pg_state st = {
|
||||
.seq = s,
|
||||
.marker = address_markers,
|
||||
.level = -1,
|
||||
.ptdump = {
|
||||
.note_page = note_page,
|
||||
.range = (struct ptdump_range[]) {
|
||||
{KERN_VIRT_START, ULONG_MAX},
|
||||
{0, 0}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
|
||||
}
|
||||
|
||||
void ptdump_check_wx(void)
|
||||
{
|
||||
struct pg_state st = {
|
||||
.seq = NULL,
|
||||
.marker = (struct addr_marker[]) {
|
||||
{0, NULL},
|
||||
{-1, NULL},
|
||||
},
|
||||
.level = -1,
|
||||
.check_wx = true,
|
||||
.ptdump = {
|
||||
.note_page = note_page,
|
||||
.range = (struct ptdump_range[]) {
|
||||
{KERN_VIRT_START, ULONG_MAX},
|
||||
{0, 0}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
|
||||
|
||||
if (st.wx_pages)
|
||||
pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
|
||||
st.wx_pages);
|
||||
else
|
||||
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
|
||||
}
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
ptdump_walk(m);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(ptdump);
|
||||
|
||||
static int ptdump_init(void)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
|
||||
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
|
||||
pg_level[i].mask |= pte_bits[j].mask;
|
||||
|
||||
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(ptdump_init);
|
@ -75,14 +75,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
|
||||
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
|
||||
/*
|
||||
* If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
|
||||
* displacement in the interval [0, MIN_KIMG_ALIGN) that
|
||||
* doesn't violate this kernel's de-facto alignment
|
||||
* Produce a displacement in the interval [0, MIN_KIMG_ALIGN)
|
||||
* that doesn't violate this kernel's de-facto alignment
|
||||
* constraints.
|
||||
*/
|
||||
u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
|
||||
u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
|
||||
(phys_seed >> 32) & mask : TEXT_OFFSET;
|
||||
u32 offset = (phys_seed >> 32) & mask;
|
||||
|
||||
/*
|
||||
* With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
|
||||
|
@ -31,7 +31,7 @@ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
|
||||
{
|
||||
BUG_ON(!ops);
|
||||
BUG_ON(!ops->dev);
|
||||
ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
|
||||
ps3_sys_manager_ops = *ops;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
|
||||
|
||||
|
@ -22,5 +22,6 @@ source "drivers/soc/ux500/Kconfig"
|
||||
source "drivers/soc/versatile/Kconfig"
|
||||
source "drivers/soc/xilinx/Kconfig"
|
||||
source "drivers/soc/zte/Kconfig"
|
||||
source "drivers/soc/kendryte/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@ -28,3 +28,4 @@ obj-$(CONFIG_ARCH_U8500) += ux500/
|
||||
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
|
||||
obj-y += xilinx/
|
||||
obj-$(CONFIG_ARCH_ZX) += zte/
|
||||
obj-$(CONFIG_SOC_KENDRYTE) += kendryte/
|
||||
|
14
drivers/soc/kendryte/Kconfig
Normal file
14
drivers/soc/kendryte/Kconfig
Normal file
@ -0,0 +1,14 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
if SOC_KENDRYTE
|
||||
|
||||
config K210_SYSCTL
|
||||
bool "Kendryte K210 system controller"
|
||||
default y
|
||||
depends on RISCV
|
||||
help
|
||||
Enables controlling the K210 various clocks and to enable
|
||||
general purpose use of the extra 2MB of SRAM normally
|
||||
reserved for the AI engine.
|
||||
|
||||
endif
|
3
drivers/soc/kendryte/Makefile
Normal file
3
drivers/soc/kendryte/Makefile
Normal file
@ -0,0 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o
|
248
drivers/soc/kendryte/k210-sysctl.c
Normal file
248
drivers/soc/kendryte/k210-sysctl.c
Normal file
@ -0,0 +1,248 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Copyright (c) 2019 Christoph Hellwig.
|
||||
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <asm/soc.h>
|
||||
|
||||
#define K210_SYSCTL_CLK0_FREQ 26000000UL
|
||||
|
||||
/* Registers base address */
|
||||
#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL
|
||||
|
||||
/* Registers */
|
||||
#define K210_SYSCTL_PLL0 0x08
|
||||
#define K210_SYSCTL_PLL1 0x0c
|
||||
/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */
|
||||
#define PLL_RESET (1 << 20)
|
||||
#define PLL_PWR (1 << 21)
|
||||
#define PLL_INTFB (1 << 22)
|
||||
#define PLL_BYPASS (1 << 23)
|
||||
#define PLL_TEST (1 << 24)
|
||||
#define PLL_OUT_EN (1 << 25)
|
||||
#define PLL_TEST_EN (1 << 26)
|
||||
#define K210_SYSCTL_PLL_LOCK 0x18
|
||||
#define PLL0_LOCK1 (1 << 0)
|
||||
#define PLL0_LOCK2 (1 << 1)
|
||||
#define PLL0_SLIP_CLEAR (1 << 2)
|
||||
#define PLL0_TEST_CLK_OUT (1 << 3)
|
||||
#define PLL1_LOCK1 (1 << 8)
|
||||
#define PLL1_LOCK2 (1 << 9)
|
||||
#define PLL1_SLIP_CLEAR (1 << 10)
|
||||
#define PLL1_TEST_CLK_OUT (1 << 11)
|
||||
#define PLL2_LOCK1 (1 << 16)
|
||||
#define PLL2_LOCK2 (1 << 16)
|
||||
#define PLL2_SLIP_CLEAR (1 << 18)
|
||||
#define PLL2_TEST_CLK_OUT (1 << 19)
|
||||
#define K210_SYSCTL_CLKSEL0 0x20
|
||||
#define CLKSEL_ACLK (1 << 0)
|
||||
#define K210_SYSCTL_CLKEN_CENT 0x28
|
||||
#define CLKEN_CPU (1 << 0)
|
||||
#define CLKEN_SRAM0 (1 << 1)
|
||||
#define CLKEN_SRAM1 (1 << 2)
|
||||
#define CLKEN_APB0 (1 << 3)
|
||||
#define CLKEN_APB1 (1 << 4)
|
||||
#define CLKEN_APB2 (1 << 5)
|
||||
#define K210_SYSCTL_CLKEN_PERI 0x2c
|
||||
#define CLKEN_ROM (1 << 0)
|
||||
#define CLKEN_DMA (1 << 1)
|
||||
#define CLKEN_AI (1 << 2)
|
||||
#define CLKEN_DVP (1 << 3)
|
||||
#define CLKEN_FFT (1 << 4)
|
||||
#define CLKEN_GPIO (1 << 5)
|
||||
#define CLKEN_SPI0 (1 << 6)
|
||||
#define CLKEN_SPI1 (1 << 7)
|
||||
#define CLKEN_SPI2 (1 << 8)
|
||||
#define CLKEN_SPI3 (1 << 9)
|
||||
#define CLKEN_I2S0 (1 << 10)
|
||||
#define CLKEN_I2S1 (1 << 11)
|
||||
#define CLKEN_I2S2 (1 << 12)
|
||||
#define CLKEN_I2C0 (1 << 13)
|
||||
#define CLKEN_I2C1 (1 << 14)
|
||||
#define CLKEN_I2C2 (1 << 15)
|
||||
#define CLKEN_UART1 (1 << 16)
|
||||
#define CLKEN_UART2 (1 << 17)
|
||||
#define CLKEN_UART3 (1 << 18)
|
||||
#define CLKEN_AES (1 << 19)
|
||||
#define CLKEN_FPIO (1 << 20)
|
||||
#define CLKEN_TIMER0 (1 << 21)
|
||||
#define CLKEN_TIMER1 (1 << 22)
|
||||
#define CLKEN_TIMER2 (1 << 23)
|
||||
#define CLKEN_WDT0 (1 << 24)
|
||||
#define CLKEN_WDT1 (1 << 25)
|
||||
#define CLKEN_SHA (1 << 26)
|
||||
#define CLKEN_OTP (1 << 27)
|
||||
#define CLKEN_RTC (1 << 29)
|
||||
|
||||
struct k210_sysctl {
|
||||
void __iomem *regs;
|
||||
struct clk_hw hw;
|
||||
};
|
||||
|
||||
static void k210_set_bits(u32 val, void __iomem *reg)
|
||||
{
|
||||
writel(readl(reg) | val, reg);
|
||||
}
|
||||
|
||||
static void k210_clear_bits(u32 val, void __iomem *reg)
|
||||
{
|
||||
writel(readl(reg) & ~val, reg);
|
||||
}
|
||||
|
||||
static void k210_pll1_enable(void __iomem *regs)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(regs + K210_SYSCTL_PLL1);
|
||||
val &= ~GENMASK(19, 0); /* clkr1 = 0 */
|
||||
val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */
|
||||
val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */
|
||||
val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */
|
||||
writel(val, regs + K210_SYSCTL_PLL1);
|
||||
|
||||
k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1);
|
||||
k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1);
|
||||
|
||||
/*
|
||||
* Reset the pll. The magic NOPs come from the Kendryte reference SDK.
|
||||
*/
|
||||
k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
|
||||
k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
|
||||
nop();
|
||||
nop();
|
||||
k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
|
||||
|
||||
for (;;) {
|
||||
val = readl(regs + K210_SYSCTL_PLL_LOCK);
|
||||
if (val & PLL1_LOCK2)
|
||||
break;
|
||||
writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK);
|
||||
}
|
||||
|
||||
k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1);
|
||||
}
|
||||
|
||||
static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw);
|
||||
u32 clksel0, pll0;
|
||||
u64 pll0_freq, clkr0, clkf0, clkod0;
|
||||
|
||||
/*
|
||||
* If the clock selector is not set, use the base frequency.
|
||||
* Otherwise, use PLL0 frequency with a frequency divisor.
|
||||
*/
|
||||
clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0);
|
||||
if (!(clksel0 & CLKSEL_ACLK))
|
||||
return K210_SYSCTL_CLK0_FREQ;
|
||||
|
||||
/*
|
||||
* Get PLL0 frequency:
|
||||
* freq = base frequency * clkf0 / (clkr0 * clkod0)
|
||||
*/
|
||||
pll0 = readl(s->regs + K210_SYSCTL_PLL0);
|
||||
clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0);
|
||||
clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0);
|
||||
clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0);
|
||||
pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0);
|
||||
|
||||
/* Get the frequency divisor from the clock selector */
|
||||
return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0));
|
||||
}
|
||||
|
||||
static const struct clk_ops k210_sysctl_clk_ops = {
|
||||
.recalc_rate = k210_sysctl_clk_recalc_rate,
|
||||
};
|
||||
|
||||
static const struct clk_init_data k210_clk_init_data = {
|
||||
.name = "k210-sysctl-pll1",
|
||||
.ops = &k210_sysctl_clk_ops,
|
||||
};
|
||||
|
||||
static int k210_sysctl_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct k210_sysctl *s;
|
||||
int error;
|
||||
|
||||
pr_info("Kendryte K210 SoC sysctl\n");
|
||||
|
||||
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
s->regs = devm_ioremap_resource(&pdev->dev,
|
||||
platform_get_resource(pdev, IORESOURCE_MEM, 0));
|
||||
if (IS_ERR(s->regs))
|
||||
return PTR_ERR(s->regs);
|
||||
|
||||
s->hw.init = &k210_clk_init_data;
|
||||
error = devm_clk_hw_register(&pdev->dev, &s->hw);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "failed to register clk");
|
||||
return error;
|
||||
}
|
||||
|
||||
error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
|
||||
&s->hw);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "adding clk provider failed\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id k210_sysctl_of_match[] = {
|
||||
{ .compatible = "kendryte,k210-sysctl", },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver k210_sysctl_driver = {
|
||||
.driver = {
|
||||
.name = "k210-sysctl",
|
||||
.of_match_table = k210_sysctl_of_match,
|
||||
},
|
||||
.probe = k210_sysctl_probe,
|
||||
};
|
||||
|
||||
static int __init k210_sysctl_init(void)
|
||||
{
|
||||
return platform_driver_register(&k210_sysctl_driver);
|
||||
}
|
||||
core_initcall(k210_sysctl_init);
|
||||
|
||||
/*
|
||||
* This needs to be called very early during initialization, given that
|
||||
* PLL1 needs to be enabled to be able to use all SRAM.
|
||||
*/
|
||||
static void __init k210_soc_early_init(const void *fdt)
|
||||
{
|
||||
void __iomem *regs;
|
||||
|
||||
regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000);
|
||||
if (!regs)
|
||||
panic("K210 sysctl ioremap");
|
||||
|
||||
/* Enable PLL1 to make the KPU SRAM useable */
|
||||
k210_pll1_enable(regs);
|
||||
|
||||
k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0);
|
||||
|
||||
k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1,
|
||||
regs + K210_SYSCTL_CLKEN_CENT);
|
||||
k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC,
|
||||
regs + K210_SYSCTL_CLKEN_PERI);
|
||||
|
||||
k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0);
|
||||
|
||||
iounmap(regs);
|
||||
}
|
||||
SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
|
@ -331,7 +331,8 @@ COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned i
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT)
|
||||
#if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT) || \
|
||||
defined(__ARCH_WANT_SYS_LLSEEK)
|
||||
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
|
||||
unsigned long, offset_low, loff_t __user *, result,
|
||||
unsigned int, whence)
|
||||
|
20
include/dt-bindings/clock/k210-clk.h
Normal file
20
include/dt-bindings/clock/k210-clk.h
Normal file
@ -0,0 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright (C) 2019-20 Sean Anderson <seanga2@gmail.com>
|
||||
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
|
||||
*/
|
||||
#ifndef K210_CLK_H
|
||||
#define K210_CLK_H
|
||||
|
||||
/*
|
||||
* Arbitrary identifiers for clocks.
|
||||
* The structure is: in0 -> pll0 -> aclk -> cpu
|
||||
*
|
||||
* Since we use the hardware defaults for now, set all these to the same clock.
|
||||
*/
|
||||
#define K210_CLK_PLL0 0
|
||||
#define K210_CLK_PLL1 0
|
||||
#define K210_CLK_ACLK 0
|
||||
#define K210_CLK_CPU 0
|
||||
|
||||
#endif /* K210_CLK_H */
|
@ -16,6 +16,10 @@ config GCC_VERSION
|
||||
default $(shell,$(srctree)/scripts/gcc-version.sh $(CC)) if CC_IS_GCC
|
||||
default 0
|
||||
|
||||
config LD_VERSION
|
||||
int
|
||||
default $(shell,$(LD) --version | $(srctree)/scripts/ld-version.sh)
|
||||
|
||||
config CC_IS_CLANG
|
||||
def_bool $(success,$(CC) --version | head -n 1 | grep -q clang)
|
||||
|
||||
|
@ -1515,7 +1515,7 @@ struct module_sect_attr {
|
||||
struct module_sect_attrs {
|
||||
struct attribute_group grp;
|
||||
unsigned int nsections;
|
||||
struct module_sect_attr attrs[0];
|
||||
struct module_sect_attr attrs[];
|
||||
};
|
||||
|
||||
static ssize_t module_sect_show(struct module_attribute *mattr,
|
||||
@ -1608,7 +1608,7 @@ static void remove_sect_attrs(struct module *mod)
|
||||
struct module_notes_attrs {
|
||||
struct kobject *dir;
|
||||
unsigned int notes;
|
||||
struct bin_attribute attrs[0];
|
||||
struct bin_attribute attrs[];
|
||||
};
|
||||
|
||||
static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
|
||||
|
@ -41,6 +41,11 @@ for dev in `ls -1 /sys/bus/pci/devices/ | grep '\.0$'` ; do
|
||||
continue;
|
||||
fi
|
||||
|
||||
if [ "ahci" = "$(basename $(realpath /sys/bus/pci/devices/$dev/driver))" ] ; then
|
||||
echo "$dev, Skipped: ahci doesn't support recovery"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Don't inject errosr into an already-frozen PE. This happens with
|
||||
# PEs that contain multiple PCI devices (e.g. multi-function cards)
|
||||
# and injecting new errors during the recovery process will probably
|
||||
|
@ -25,6 +25,7 @@ $(OUTPUT)/tm-unavailable: CFLAGS += -O0 -pthread -m64 -Wno-error=uninitialized -
|
||||
$(OUTPUT)/tm-trap: CFLAGS += -O0 -pthread -m64
|
||||
$(OUTPUT)/tm-signal-context-force-tm: CFLAGS += -pthread -m64
|
||||
$(OUTPUT)/tm-signal-pagefault: CFLAGS += -pthread -m64
|
||||
$(OUTPUT)/tm-poison: CFLAGS += -m64
|
||||
|
||||
SIGNAL_CONTEXT_CHK_TESTS := $(patsubst %,$(OUTPUT)/%,$(SIGNAL_CONTEXT_CHK_TESTS))
|
||||
$(SIGNAL_CONTEXT_CHK_TESTS): tm-signal.S
|
||||
|
Loading…
Reference in New Issue
Block a user