s390 updates for 5.15 merge window
- Improve ftrace code patching so that stop_machine is not required anymore. This requires a small common code patch acked by Steven Rostedt: https://lore.kernel.org/linux-s390/20210730220741.4da6fdf6@oasis.local.home/ - Enable KCSAN for s390. This comes with a small common code change to fix a compile warning. Acked by Marco Elver: https://lore.kernel.org/r/20210729142811.1309391-1-hca@linux.ibm.com - Add KFENCE support for s390. This also comes with a minimal x86 patch from Marco Elver who said also this can be carried via the s390 tree: https://lore.kernel.org/linux-s390/YQJdarx6XSUQ1tFZ@elver.google.com/ - More changes to prepare the decompressor for relocation. - Enable DAT also for CPU restart path. - Final set of register asm removal patches; leaving only three locations where needed and sane. - Add NNPA, Vector-Packed-Decimal-Enhancement Facility 2, PCI MIO support to hwcaps flags. - Cleanup hwcaps implementation. - Add new instructions to in-kernel disassembler. - Various QDIO cleanups. - Add SCLP debug feature. - Various other cleanups and improvements all over the place. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEECMNfWEw3SLnmiLkZIg7DeRspbsIFAmEs1GsACgkQIg7DeRsp bsJ9+A/9FApCECNPgu6jOX4Ee+no+LxpCPUF8rvt56TFTLv7+Dhm7fJl0xQ9utsZ FyLMDAr1/FKdm2wBW23QZH4vEIt1bd6e/03DwwK+6IjHKZHRIfB8eGJMsLj/TDzm K6/+FI7qXjvpNXxgkCqXf5yESi/y5Dgr+16kTBhPZj5awRiwe5puPamji3uiQ45V r4MdGCCC9BnTZvtPpUrr8ImnUqHJ4/TMo1YYdykLbZFuAvvYUyZ5YG5kh0pMa8JZ DGJpfLQfy7ZNscIzdVhZtfzzESVtS6/AOeBzDMO1pbM1CGXtvpJJP0Wjlr/PGwoW fvuMHpqTlDi+TfNZiPP5lwsFC89xSd6gtZH7vAuI8kFCXgW3RMjABF6h/mzpH1WO jXyaSEZROc/83gxPMYyOYiqrKyAFPbpZ/Rnav2bvGQGneqx7RvmpF3GgA9WEo1PW rMDoEbLstJuHk0E2uEV+OnQd5F7MHNonzpYfp/7pyQ+PL8w2GExV9yngVc/f3TqG HYLC9rc3K6DkxZappcJm0qTb7lDTMFI7xK3g9RiqPQBJE1v1MYE/rai48nW69ypE bRNL76AjyXKo+zKR2wlhJVMY1I1+DarMopHhZj6fzQT5te1LLsv8OuTU2gkt6dIq YoSYOYvModf3HbKnJul2tszQG9yl+vpE9MiCyBQSsxIYXCriq/c= =WDRh -----END PGP SIGNATURE----- Merge tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux Pull s390 updates from Heiko Carstens: - Improve ftrace code patching so that stop_machine is not required anymore. This requires a small common code patch acked by Steven Rostedt: https://lore.kernel.org/linux-s390/20210730220741.4da6fdf6@oasis.local.home/ - Enable KCSAN for s390. This comes with a small common code change to fix a compile warning. Acked by Marco Elver: https://lore.kernel.org/r/20210729142811.1309391-1-hca@linux.ibm.com - Add KFENCE support for s390. This also comes with a minimal x86 patch from Marco Elver who said also this can be carried via the s390 tree: https://lore.kernel.org/linux-s390/YQJdarx6XSUQ1tFZ@elver.google.com/ - More changes to prepare the decompressor for relocation. - Enable DAT also for CPU restart path. - Final set of register asm removal patches; leaving only three locations where needed and sane. - Add NNPA, Vector-Packed-Decimal-Enhancement Facility 2, PCI MIO support to hwcaps flags. - Cleanup hwcaps implementation. - Add new instructions to in-kernel disassembler. - Various QDIO cleanups. - Add SCLP debug feature. - Various other cleanups and improvements all over the place. * tag 's390-5.15-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (105 commits) s390: remove SCHED_CORE from defconfigs s390/smp: do not use nodat_stack for secondary CPU start s390/smp: enable DAT before CPU restart callback is called s390: update defconfigs s390/ap: fix state machine hang after failure to enable irq KVM: s390: generate kvm hypercall functions s390/sclp: add tracing of SCLP interactions s390/debug: add early tracing support s390/debug: fix debug area life cycle s390/debug: keep debug data on resize s390/diag: make restart_part2 a local label s390/mm,pageattr: fix walk_pte_level() early exit s390: fix typo in linker script s390: remove do_signal() prototype and do_notify_resume() function s390/crypto: fix all kernel-doc warnings in vfio_ap_ops.c s390/pci: improve DMA translation init and exit s390/pci: simplify CLP List PCI handling s390/pci: handle FH state mismatch only on disable s390/pci: fix misleading rc in clp_set_pci_fn() s390/boot: factor out offset_vmlinux_info() function ...
This commit is contained in:
commit
c7a5238ef6
@ -138,6 +138,8 @@ config S390
|
||||
select HAVE_ARCH_JUMP_LABEL_RELATIVE
|
||||
select HAVE_ARCH_KASAN
|
||||
select HAVE_ARCH_KASAN_VMALLOC
|
||||
select HAVE_ARCH_KCSAN
|
||||
select HAVE_ARCH_KFENCE
|
||||
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_SOFT_DIRTY
|
||||
|
@ -142,7 +142,8 @@ all: bzImage
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
|
||||
install:
|
||||
$(Q)$(MAKE) $(build)=$(boot) $@
|
||||
sh -x $(srctree)/$(boot)/install.sh $(KERNELRELEASE) $(KBUILD_IMAGE) \
|
||||
System.map "$(INSTALL_PATH)"
|
||||
|
||||
bzImage: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||
|
@ -7,6 +7,7 @@ KCOV_INSTRUMENT := n
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KASAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
|
||||
KBUILD_AFLAGS := $(KBUILD_AFLAGS_DECOMPRESSOR)
|
||||
KBUILD_CFLAGS := $(KBUILD_CFLAGS_DECOMPRESSOR)
|
||||
@ -36,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
|
||||
|
||||
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
|
||||
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o text_dma.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o
|
||||
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
|
||||
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
|
||||
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
@ -69,7 +70,3 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
|
||||
|
||||
$(obj)/startup.a: $(OBJECTS) FORCE
|
||||
$(call if_changed,ar)
|
||||
|
||||
install:
|
||||
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
|
||||
System.map "$(INSTALL_PATH)"
|
||||
|
@ -2,14 +2,9 @@
|
||||
#ifndef BOOT_BOOT_H
|
||||
#define BOOT_BOOT_H
|
||||
|
||||
#include <asm/extable.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define BOOT_STACK_OFFSET 0x8000
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
void startup_kernel(void);
|
||||
unsigned long detect_memory(void);
|
||||
bool is_ipl_block_dump(void);
|
||||
@ -18,17 +13,22 @@ void setup_boot_command_line(void);
|
||||
void parse_boot_command_line(void);
|
||||
void verify_facilities(void);
|
||||
void print_missing_facilities(void);
|
||||
void sclp_early_setup_buffer(void);
|
||||
void print_pgm_check_info(void);
|
||||
unsigned long get_random_base(unsigned long safe_addr);
|
||||
void __printf(1, 2) decompressor_printk(const char *fmt, ...);
|
||||
|
||||
/* Symbols defined by linker scripts */
|
||||
extern const char kernel_version[];
|
||||
extern unsigned long memory_limit;
|
||||
extern unsigned long vmalloc_size;
|
||||
extern int vmalloc_size_set;
|
||||
extern int kaslr_enabled;
|
||||
extern char __boot_data_start[], __boot_data_end[];
|
||||
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
|
||||
extern char _decompressor_syms_start[], _decompressor_syms_end[];
|
||||
extern char _stack_start[], _stack_end[];
|
||||
|
||||
unsigned long read_ipl_report(unsigned long safe_offset);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* BOOT_BOOT_H */
|
||||
|
@ -9,6 +9,7 @@ KCOV_INSTRUMENT := n
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KASAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
|
||||
obj-y := $(if $(CONFIG_KERNEL_UNCOMPRESSED),,decompressor.o) info.o
|
||||
obj-$(CONFIG_KERNEL_ZSTD) += clz_ctz.o
|
||||
|
@ -23,11 +23,6 @@
|
||||
#define memmove memmove
|
||||
#define memzero(s, n) memset((s), 0, (n))
|
||||
|
||||
/* Symbols defined by linker scripts */
|
||||
extern char _end[];
|
||||
extern unsigned char _compressed_start[];
|
||||
extern unsigned char _compressed_end[];
|
||||
|
||||
#ifdef CONFIG_KERNEL_BZIP2
|
||||
#define BOOT_HEAP_SIZE 0x400000
|
||||
#elif CONFIG_KERNEL_ZSTD
|
||||
|
@ -26,7 +26,12 @@ struct vmlinux_info {
|
||||
unsigned long rela_dyn_end;
|
||||
};
|
||||
|
||||
/* Symbols defined by linker scripts */
|
||||
extern char _end[];
|
||||
extern unsigned char _compressed_start[];
|
||||
extern unsigned char _compressed_end[];
|
||||
extern char _vmlinux_info[];
|
||||
|
||||
#define vmlinux (*(struct vmlinux_info *)_vmlinux_info)
|
||||
|
||||
#endif /* BOOT_COMPRESSED_DECOMPRESSOR_H */
|
||||
|
@ -1,6 +1,9 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/vmlinux.lds.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
|
||||
OUTPUT_ARCH(s390:64-bit)
|
||||
@ -34,27 +37,6 @@ SECTIONS
|
||||
*(.data.*)
|
||||
_edata = . ;
|
||||
}
|
||||
/*
|
||||
* .dma section for code, data, ex_table that need to stay below 2 GB,
|
||||
* even when the kernel is relocate: above 2 GB.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_sdma = .;
|
||||
.dma.text : {
|
||||
_stext_dma = .;
|
||||
*(.dma.text)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_etext_dma = .;
|
||||
}
|
||||
. = ALIGN(16);
|
||||
.dma.ex_table : {
|
||||
_start_dma_ex_table = .;
|
||||
KEEP(*(.dma.ex_table))
|
||||
_stop_dma_ex_table = .;
|
||||
}
|
||||
.dma.data : { *(.dma.data) }
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_edma = .;
|
||||
|
||||
BOOT_DATA
|
||||
BOOT_DATA_PRESERVED
|
||||
@ -69,6 +51,17 @@ SECTIONS
|
||||
*(.bss)
|
||||
*(.bss.*)
|
||||
*(COMMON)
|
||||
/*
|
||||
* Stacks for the decompressor
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_dump_info_stack_start = .;
|
||||
. += PAGE_SIZE;
|
||||
_dump_info_stack_end = .;
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_stack_start = .;
|
||||
. += BOOT_STACK_SIZE;
|
||||
_stack_end = .;
|
||||
_ebss = .;
|
||||
}
|
||||
|
||||
|
@ -25,13 +25,15 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include "boot.h"
|
||||
#include <asm/sclp.h>
|
||||
|
||||
#define ARCH_OFFSET 4
|
||||
|
||||
#define EP_OFFSET 0x10008
|
||||
#define EP_STRING "S390EP"
|
||||
|
||||
__HEAD
|
||||
|
||||
#define IPL_BS 0x730
|
||||
@ -275,11 +277,11 @@ iplstart:
|
||||
.Lcpuid:.fill 8,1,0
|
||||
|
||||
#
|
||||
# startup-code at 0x10000, running in absolute addressing mode
|
||||
# normal startup-code, running in absolute addressing mode
|
||||
# this is called either by the ipl loader or directly by PSW restart
|
||||
# or linload or SALIPL
|
||||
#
|
||||
.org 0x10000
|
||||
.org STARTUP_NORMAL_OFFSET
|
||||
SYM_CODE_START(startup)
|
||||
j startup_normal
|
||||
.org EP_OFFSET
|
||||
@ -292,9 +294,9 @@ SYM_CODE_START(startup)
|
||||
.ascii EP_STRING
|
||||
.byte 0x00,0x01
|
||||
#
|
||||
# kdump startup-code at 0x10010, running in 64 bit absolute addressing mode
|
||||
# kdump startup-code, running in 64 bit absolute addressing mode
|
||||
#
|
||||
.org 0x10010
|
||||
.org STARTUP_KDUMP_OFFSET
|
||||
j startup_kdump
|
||||
SYM_CODE_END(startup)
|
||||
SYM_CODE_START_LOCAL(startup_normal)
|
||||
@ -315,18 +317,16 @@ SYM_CODE_START_LOCAL(startup_normal)
|
||||
xc 0x300(256),0x300
|
||||
xc 0xe00(256),0xe00
|
||||
xc 0xf00(256),0xf00
|
||||
lctlg %c0,%c15,.Lctl-.LPG0(%r13) # load control registers
|
||||
stcke __LC_BOOT_CLOCK
|
||||
mvc __LC_LAST_UPDATE_CLOCK(8),__LC_BOOT_CLOCK+1
|
||||
spt 6f-.LPG0(%r13)
|
||||
mvc __LC_LAST_UPDATE_TIMER(8),6f-.LPG0(%r13)
|
||||
l %r15,.Lstack-.LPG0(%r13)
|
||||
larl %r15,_stack_end-STACK_FRAME_OVERHEAD
|
||||
brasl %r14,sclp_early_setup_buffer
|
||||
brasl %r14,verify_facilities
|
||||
brasl %r14,startup_kernel
|
||||
SYM_CODE_END(startup_normal)
|
||||
|
||||
.Lstack:
|
||||
.long BOOT_STACK_OFFSET + BOOT_STACK_SIZE - STACK_FRAME_OVERHEAD
|
||||
.align 8
|
||||
6: .long 0x7fffffff,0xffffffff
|
||||
.Lext_new_psw:
|
||||
@ -335,35 +335,6 @@ SYM_CODE_END(startup_normal)
|
||||
.quad 0x0000000180000000,startup_pgm_check_handler
|
||||
.Lio_new_psw:
|
||||
.quad 0x0002000180000000,0x1f0 # disabled wait
|
||||
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
|
||||
.quad 0 # cr1: primary space segment table
|
||||
.quad .Lduct # cr2: dispatchable unit control table
|
||||
.quad 0 # cr3: instruction authorization
|
||||
.quad 0xffff # cr4: instruction authorization
|
||||
.quad .Lduct # cr5: primary-aste origin
|
||||
.quad 0 # cr6: I/O interrupts
|
||||
.quad 0 # cr7: secondary space segment table
|
||||
.quad 0x0000000000008000 # cr8: access registers translation
|
||||
.quad 0 # cr9: tracing off
|
||||
.quad 0 # cr10: tracing off
|
||||
.quad 0 # cr11: tracing off
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad .Llinkage_stack # cr15: linkage stack operations
|
||||
|
||||
.section .dma.data,"aw",@progbits
|
||||
.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.Llinkage_stack:
|
||||
.long 0,0,0x89000000,0,0,0,0x8a000000,0
|
||||
.align 64
|
||||
.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
|
||||
.align 128
|
||||
.Lduald:.rept 8
|
||||
.long 0x80000000,0,0,0 # invalid access-list entries
|
||||
.endr
|
||||
.previous
|
||||
|
||||
#include "head_kdump.S"
|
||||
|
||||
@ -386,15 +357,13 @@ SYM_CODE_START_LOCAL(startup_pgm_check_handler)
|
||||
oi __LC_RETURN_PSW+1,0x2 # set wait state bit
|
||||
larl %r9,.Lold_psw_disabled_wait
|
||||
stg %r9,__LC_PGM_NEW_PSW+8
|
||||
l %r15,.Ldump_info_stack-.Lold_psw_disabled_wait(%r9)
|
||||
larl %r15,_dump_info_stack_end-STACK_FRAME_OVERHEAD
|
||||
brasl %r14,print_pgm_check_info
|
||||
.Lold_psw_disabled_wait:
|
||||
la %r8,4095
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r8)
|
||||
lpswe __LC_RETURN_PSW # disabled wait
|
||||
SYM_CODE_END(startup_pgm_check_handler)
|
||||
.Ldump_info_stack:
|
||||
.long 0x5000 + PAGE_SIZE - STACK_FRAME_OVERHEAD
|
||||
|
||||
#
|
||||
# params at 10400 (setup.h)
|
||||
@ -415,7 +384,4 @@ SYM_DATA_START(parmarea)
|
||||
.org PARMAREA+__PARMAREA_SIZE
|
||||
SYM_DATA_END(parmarea)
|
||||
|
||||
.org EARLY_SCCB_OFFSET
|
||||
.fill 4096
|
||||
|
||||
.org HEAD_END
|
||||
|
@ -54,9 +54,9 @@ static unsigned long find_bootdata_space(struct ipl_rb_components *comps,
|
||||
* not overlap with any component or any certificate.
|
||||
*/
|
||||
repeat:
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
|
||||
intersects(INITRD_START, INITRD_SIZE, safe_addr, size))
|
||||
safe_addr = INITRD_START + INITRD_SIZE;
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
|
||||
intersects(initrd_data.start, initrd_data.size, safe_addr, size))
|
||||
safe_addr = initrd_data.start + initrd_data.size;
|
||||
for_each_rb_entry(comp, comps)
|
||||
if (intersects(safe_addr, size, comp->addr, comp->len)) {
|
||||
safe_addr = comp->addr + comp->len;
|
||||
|
@ -186,9 +186,9 @@ unsigned long get_random_base(unsigned long safe_addr)
|
||||
*/
|
||||
memory_limit -= kasan_estimate_memory_needs(memory_limit);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
|
||||
if (safe_addr < INITRD_START + INITRD_SIZE)
|
||||
safe_addr = INITRD_START + INITRD_SIZE;
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
|
||||
if (safe_addr < initrd_data.start + initrd_data.size)
|
||||
safe_addr = initrd_data.start + initrd_data.size;
|
||||
}
|
||||
safe_addr = ALIGN(safe_addr, THREAD_SIZE);
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/mem_detect.h>
|
||||
@ -24,9 +26,9 @@ static void *mem_detect_alloc_extended(void)
|
||||
{
|
||||
unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
|
||||
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
|
||||
INITRD_START < offset + ENTRIES_EXTENDED_MAX)
|
||||
offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size &&
|
||||
initrd_data.start < offset + ENTRIES_EXTENDED_MAX)
|
||||
offset = ALIGN(initrd_data.start + initrd_data.size, sizeof(u64));
|
||||
|
||||
return (void *)offset;
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ static char *symstart(char *p)
|
||||
return p + 1;
|
||||
}
|
||||
|
||||
extern char _decompressor_syms_start[], _decompressor_syms_end[];
|
||||
static noinline char *findsym(unsigned long ip, unsigned short *off, unsigned short *len)
|
||||
{
|
||||
/* symbol entries are in a form "10000 c4 startup\0" */
|
||||
@ -126,8 +125,8 @@ void decompressor_printk(const char *fmt, ...)
|
||||
|
||||
static noinline void print_stacktrace(void)
|
||||
{
|
||||
struct stack_info boot_stack = { STACK_TYPE_TASK, BOOT_STACK_OFFSET,
|
||||
BOOT_STACK_OFFSET + BOOT_STACK_SIZE };
|
||||
struct stack_info boot_stack = { STACK_TYPE_TASK, (unsigned long)_stack_start,
|
||||
(unsigned long)_stack_end };
|
||||
unsigned long sp = S390_lowcore.gpregs_save_area[15];
|
||||
bool first = true;
|
||||
|
||||
|
@ -1,2 +1,11 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include "boot.h"
|
||||
#include "../../../drivers/s390/char/sclp_early_core.c"
|
||||
|
||||
/* SCLP early buffer must stay page-aligned and below 2GB */
|
||||
static char __sclp_early_sccb[EXT_SCCB_READ_SCP] __aligned(PAGE_SIZE);
|
||||
|
||||
void sclp_early_setup_buffer(void)
|
||||
{
|
||||
sclp_early_set_buffer(&__sclp_early_sccb);
|
||||
}
|
||||
|
@ -12,9 +12,8 @@
|
||||
#include <asm/uv.h>
|
||||
#include "compressed/decompressor.h"
|
||||
#include "boot.h"
|
||||
#include "uv.h"
|
||||
|
||||
extern char __boot_data_start[], __boot_data_end[];
|
||||
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
|
||||
unsigned long __bootdata_preserved(__kaslr_offset);
|
||||
unsigned long __bootdata_preserved(VMALLOC_START);
|
||||
unsigned long __bootdata_preserved(VMALLOC_END);
|
||||
@ -24,44 +23,11 @@ unsigned long __bootdata_preserved(MODULES_VADDR);
|
||||
unsigned long __bootdata_preserved(MODULES_END);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
int __bootdata(is_full_image) = 1;
|
||||
struct initrd_data __bootdata(initrd_data);
|
||||
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
|
||||
|
||||
/*
|
||||
* Some code and data needs to stay below 2 GB, even when the kernel would be
|
||||
* relocated above 2 GB, because it has to use 31 bit addresses.
|
||||
* Such code and data is part of the .dma section, and its location is passed
|
||||
* over to the decompressed / relocated kernel via the .boot.preserved.data
|
||||
* section.
|
||||
*/
|
||||
extern char _sdma[], _edma[];
|
||||
extern char _stext_dma[], _etext_dma[];
|
||||
extern struct exception_table_entry _start_dma_ex_table[];
|
||||
extern struct exception_table_entry _stop_dma_ex_table[];
|
||||
unsigned long __bootdata_preserved(__sdma) = __pa(&_sdma);
|
||||
unsigned long __bootdata_preserved(__edma) = __pa(&_edma);
|
||||
unsigned long __bootdata_preserved(__stext_dma) = __pa(&_stext_dma);
|
||||
unsigned long __bootdata_preserved(__etext_dma) = __pa(&_etext_dma);
|
||||
struct exception_table_entry *
|
||||
__bootdata_preserved(__start_dma_ex_table) = _start_dma_ex_table;
|
||||
struct exception_table_entry *
|
||||
__bootdata_preserved(__stop_dma_ex_table) = _stop_dma_ex_table;
|
||||
|
||||
int _diag210_dma(struct diag210 *addr);
|
||||
int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode);
|
||||
int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode);
|
||||
void _diag0c_dma(struct hypfs_diag0c_entry *entry);
|
||||
void _diag308_reset_dma(void);
|
||||
struct diag_ops __bootdata_preserved(diag_dma_ops) = {
|
||||
.diag210 = _diag210_dma,
|
||||
.diag26c = _diag26c_dma,
|
||||
.diag14 = _diag14_dma,
|
||||
.diag0c = _diag0c_dma,
|
||||
.diag308_reset = _diag308_reset_dma
|
||||
};
|
||||
static struct diag210 _diag210_tmp_dma __section(".dma.data");
|
||||
struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma;
|
||||
struct oldmem_data __bootdata_preserved(oldmem_data);
|
||||
|
||||
void error(char *x)
|
||||
{
|
||||
@ -91,12 +57,12 @@ static void rescue_initrd(unsigned long addr)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
|
||||
return;
|
||||
if (!INITRD_START || !INITRD_SIZE)
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
return;
|
||||
if (addr <= INITRD_START)
|
||||
if (addr <= initrd_data.start)
|
||||
return;
|
||||
memmove((void *)addr, (void *)INITRD_START, INITRD_SIZE);
|
||||
INITRD_START = addr;
|
||||
memmove((void *)addr, (void *)initrd_data.start, initrd_data.size);
|
||||
initrd_data.start = addr;
|
||||
}
|
||||
|
||||
static void copy_bootdata(void)
|
||||
@ -169,9 +135,9 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
|
||||
ident_map_size = min(ident_map_size, 1UL << MAX_PHYSMEM_BITS);
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
if (OLDMEM_BASE) {
|
||||
if (oldmem_data.start) {
|
||||
kaslr_enabled = 0;
|
||||
ident_map_size = min(ident_map_size, OLDMEM_SIZE);
|
||||
ident_map_size = min(ident_map_size, oldmem_data.size);
|
||||
} else if (ipl_block_valid && is_ipl_block_dump()) {
|
||||
kaslr_enabled = 0;
|
||||
if (!sclp_early_get_hsa_size(&hsa_size) && hsa_size)
|
||||
@ -282,12 +248,28 @@ static void setup_vmalloc_size(void)
|
||||
vmalloc_size = max(size, vmalloc_size);
|
||||
}
|
||||
|
||||
static void offset_vmlinux_info(unsigned long offset)
|
||||
{
|
||||
vmlinux.default_lma += offset;
|
||||
*(unsigned long *)(&vmlinux.entry) += offset;
|
||||
vmlinux.bootdata_off += offset;
|
||||
vmlinux.bootdata_preserved_off += offset;
|
||||
vmlinux.rela_dyn_start += offset;
|
||||
vmlinux.rela_dyn_end += offset;
|
||||
vmlinux.dynsym_start += offset;
|
||||
}
|
||||
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long random_lma;
|
||||
unsigned long safe_addr;
|
||||
void *img;
|
||||
|
||||
initrd_data.start = parmarea.initrd_start;
|
||||
initrd_data.size = parmarea.initrd_size;
|
||||
oldmem_data.start = parmarea.oldmem_base;
|
||||
oldmem_data.size = parmarea.oldmem_size;
|
||||
|
||||
setup_lpp();
|
||||
store_ipl_parmblock();
|
||||
safe_addr = mem_safe_offset();
|
||||
@ -297,23 +279,17 @@ void startup_kernel(void)
|
||||
sclp_early_read_info();
|
||||
setup_boot_command_line();
|
||||
parse_boot_command_line();
|
||||
sanitize_prot_virt_host();
|
||||
setup_ident_map_size(detect_memory());
|
||||
setup_vmalloc_size();
|
||||
setup_kernel_memory_layout();
|
||||
|
||||
random_lma = __kaslr_offset = 0;
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
|
||||
random_lma = get_random_base(safe_addr);
|
||||
if (random_lma) {
|
||||
__kaslr_offset = random_lma - vmlinux.default_lma;
|
||||
img = (void *)vmlinux.default_lma;
|
||||
vmlinux.default_lma += __kaslr_offset;
|
||||
vmlinux.entry += __kaslr_offset;
|
||||
vmlinux.bootdata_off += __kaslr_offset;
|
||||
vmlinux.bootdata_preserved_off += __kaslr_offset;
|
||||
vmlinux.rela_dyn_start += __kaslr_offset;
|
||||
vmlinux.rela_dyn_end += __kaslr_offset;
|
||||
vmlinux.dynsym_start += __kaslr_offset;
|
||||
offset_vmlinux_info(__kaslr_offset);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <asm/uv.h>
|
||||
#include <asm/boot_data.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "boot.h"
|
||||
#include "uv.h"
|
||||
|
||||
/* will be used in arch/s390/kernel/uv.c */
|
||||
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
|
||||
int __bootdata_preserved(prot_virt_guest);
|
||||
@ -47,26 +51,34 @@ void uv_query_info(void)
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
static bool has_uv_sec_stor_limit(void)
|
||||
{
|
||||
/*
|
||||
* keep these conditions in line with setup_uv()
|
||||
*/
|
||||
if (!is_prot_virt_host())
|
||||
return false;
|
||||
|
||||
if (is_prot_virt_guest())
|
||||
return false;
|
||||
|
||||
if (!test_facility(158))
|
||||
return false;
|
||||
|
||||
return !!uv_info.max_sec_stor_addr;
|
||||
}
|
||||
|
||||
void adjust_to_uv_max(unsigned long *vmax)
|
||||
{
|
||||
if (has_uv_sec_stor_limit())
|
||||
if (is_prot_virt_host() && uv_info.max_sec_stor_addr)
|
||||
*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
|
||||
}
|
||||
|
||||
static int is_prot_virt_host_capable(void)
|
||||
{
|
||||
/* disable if no prot_virt=1 given on command-line */
|
||||
if (!is_prot_virt_host())
|
||||
return 0;
|
||||
/* disable if protected guest virtualization is enabled */
|
||||
if (is_prot_virt_guest())
|
||||
return 0;
|
||||
/* disable if no hardware support */
|
||||
if (!test_facility(158))
|
||||
return 0;
|
||||
/* disable if kdump */
|
||||
if (oldmem_data.start)
|
||||
return 0;
|
||||
/* disable if stand-alone dump */
|
||||
if (ipl_block_valid && is_ipl_block_dump())
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void sanitize_prot_virt_host(void)
|
||||
{
|
||||
prot_virt_host = is_prot_virt_host_capable();
|
||||
}
|
||||
#endif
|
||||
|
19
arch/s390/boot/uv.h
Normal file
19
arch/s390/boot/uv.h
Normal file
@ -0,0 +1,19 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef BOOT_UV_H
|
||||
#define BOOT_UV_H
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
void adjust_to_uv_max(unsigned long *vmax);
|
||||
void sanitize_prot_virt_host(void);
|
||||
#else
|
||||
static inline void adjust_to_uv_max(unsigned long *vmax) {}
|
||||
static inline void sanitize_prot_virt_host(void) {}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
|
||||
void uv_query_info(void);
|
||||
#else
|
||||
static inline void uv_query_info(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* BOOT_UV_H */
|
@ -10,7 +10,6 @@ CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_PREEMPT=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -75,7 +74,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
||||
CONFIG_MODVERSIONS=y
|
||||
CONFIG_MODULE_SRCVERSION_ALL=y
|
||||
CONFIG_MODULE_SIG_SHA256=y
|
||||
CONFIG_BLK_DEV_INTEGRITY=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_WBT=y
|
||||
CONFIG_BLK_CGROUP_IOLATENCY=y
|
||||
@ -466,6 +464,7 @@ CONFIG_DM_FLAKEY=m
|
||||
CONFIG_DM_VERITY=m
|
||||
CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
|
||||
CONFIG_DM_SWITCH=m
|
||||
CONFIG_DM_INTEGRITY=m
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_BONDING=m
|
||||
CONFIG_DUMMY=m
|
||||
|
@ -8,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_BPF_LSM=y
|
||||
CONFIG_SCHED_CORE=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
|
@ -21,7 +21,7 @@
|
||||
static void diag0c_fn(void *data)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X00C);
|
||||
diag_dma_ops.diag0c(((void **) data)[smp_processor_id()]);
|
||||
diag_amode31_ops.diag0c(((void **)data)[smp_processor_id()]);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -33,12 +33,12 @@ static void *diag0c_store(unsigned int *count)
|
||||
unsigned int cpu_count, cpu, i;
|
||||
void **cpu_vec;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
cpu_count = num_online_cpus();
|
||||
cpu_vec = kmalloc_array(num_possible_cpus(), sizeof(*cpu_vec),
|
||||
GFP_KERNEL);
|
||||
if (!cpu_vec)
|
||||
goto fail_put_online_cpus;
|
||||
goto fail_unlock_cpus;
|
||||
/* Note: Diag 0c needs 8 byte alignment and real storage */
|
||||
diag0c_data = kzalloc(struct_size(diag0c_data, entry, cpu_count),
|
||||
GFP_KERNEL | GFP_DMA);
|
||||
@ -54,13 +54,13 @@ static void *diag0c_store(unsigned int *count)
|
||||
on_each_cpu(diag0c_fn, cpu_vec, 1);
|
||||
*count = cpu_count;
|
||||
kfree(cpu_vec);
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return diag0c_data;
|
||||
|
||||
fail_kfree_cpu_vec:
|
||||
kfree(cpu_vec);
|
||||
fail_put_online_cpus:
|
||||
put_online_cpus();
|
||||
fail_unlock_cpus:
|
||||
cpus_read_unlock();
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,6 @@
|
||||
#ifndef _ASM_S390_CIO_H_
|
||||
#define _ASM_S390_CIO_H_
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/types.h>
|
||||
|
@ -173,17 +173,16 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
|
||||
*/
|
||||
static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
|
||||
{
|
||||
register unsigned long r0 asm("0") = 0; /* query function */
|
||||
register unsigned long r1 asm("1") = (unsigned long) mask;
|
||||
|
||||
asm volatile(
|
||||
" spm 0\n" /* pckmo doesn't change the cc */
|
||||
" lghi 0,0\n" /* query function */
|
||||
" lgr 1,%[mask]\n"
|
||||
" spm 0\n" /* pckmo doesn't change the cc */
|
||||
/* Parameter regs are ignored, but must be nonzero and unique */
|
||||
"0: .insn rrf,%[opc] << 16,2,4,6,0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: "=m" (*mask)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (opcode)
|
||||
: "cc");
|
||||
: [mask] "d" ((unsigned long)mask), [opc] "i" (opcode)
|
||||
: "cc", "0", "1");
|
||||
}
|
||||
|
||||
static __always_inline int __cpacf_check_opcode(unsigned int opcode)
|
||||
@ -249,20 +248,22 @@ static __always_inline int cpacf_query_func(unsigned int opcode, unsigned int fu
|
||||
static inline int cpacf_km(unsigned long func, void *param,
|
||||
u8 *dest, const u8 *src, long src_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) dest;
|
||||
union register_pair d, s;
|
||||
|
||||
d.even = (unsigned long)dest;
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KM)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KM)
|
||||
: "cc", "memory", "0", "1");
|
||||
|
||||
return src_len - r3;
|
||||
return src_len - s.odd;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -279,20 +280,22 @@ static inline int cpacf_km(unsigned long func, void *param,
|
||||
static inline int cpacf_kmc(unsigned long func, void *param,
|
||||
u8 *dest, const u8 *src, long src_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) dest;
|
||||
union register_pair d, s;
|
||||
|
||||
d.even = (unsigned long)dest;
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3), [dst] "+a" (r4)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMC)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMC)
|
||||
: "cc", "memory", "0", "1");
|
||||
|
||||
return src_len - r3;
|
||||
return src_len - s.odd;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -306,17 +309,19 @@ static inline int cpacf_kmc(unsigned long func, void *param,
|
||||
static inline void cpacf_kimd(unsigned long func, void *param,
|
||||
const u8 *src, long src_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
union register_pair s;
|
||||
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,0,%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KIMD)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)(param)),
|
||||
[opc] "i" (CPACF_KIMD)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -329,17 +334,19 @@ static inline void cpacf_kimd(unsigned long func, void *param,
|
||||
static inline void cpacf_klmd(unsigned long func, void *param,
|
||||
const u8 *src, long src_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
union register_pair s;
|
||||
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,0,%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KLMD)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KLMD)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -355,19 +362,21 @@ static inline void cpacf_klmd(unsigned long func, void *param,
|
||||
static inline int cpacf_kmac(unsigned long func, void *param,
|
||||
const u8 *src, long src_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
union register_pair s;
|
||||
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,0,%[src]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMAC)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMAC)
|
||||
: "cc", "memory", "0", "1");
|
||||
|
||||
return src_len - r3;
|
||||
return src_len - s.odd;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -385,22 +394,24 @@ static inline int cpacf_kmac(unsigned long func, void *param,
|
||||
static inline int cpacf_kmctr(unsigned long func, void *param, u8 *dest,
|
||||
const u8 *src, long src_len, u8 *counter)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) dest;
|
||||
register unsigned long r6 asm("6") = (unsigned long) counter;
|
||||
union register_pair d, s, c;
|
||||
|
||||
d.even = (unsigned long)dest;
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
c.even = (unsigned long)counter;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[ctr],0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [src] "+a" (r2), [len] "+d" (r3),
|
||||
[dst] "+a" (r4), [ctr] "+a" (r6)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMCTR)
|
||||
: "cc", "memory");
|
||||
: [src] "+&d" (s.pair), [dst] "+&d" (d.pair),
|
||||
[ctr] "+&d" (c.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMCTR)
|
||||
: "cc", "memory", "0", "1");
|
||||
|
||||
return src_len - r3;
|
||||
return src_len - s.odd;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -417,20 +428,21 @@ static inline void cpacf_prno(unsigned long func, void *param,
|
||||
u8 *dest, unsigned long dest_len,
|
||||
const u8 *seed, unsigned long seed_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) dest;
|
||||
register unsigned long r3 asm("3") = (unsigned long) dest_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) seed;
|
||||
register unsigned long r5 asm("5") = (unsigned long) seed_len;
|
||||
union register_pair d, s;
|
||||
|
||||
d.even = (unsigned long)dest;
|
||||
d.odd = (unsigned long)dest_len;
|
||||
s.even = (unsigned long)seed;
|
||||
s.odd = (unsigned long)seed_len;
|
||||
asm volatile (
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[dst],%[seed]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [dst] "+a" (r2), [dlen] "+d" (r3)
|
||||
: [fc] "d" (r0), [pba] "a" (r1),
|
||||
[seed] "a" (r4), [slen] "d" (r5), [opc] "i" (CPACF_PRNO)
|
||||
: "cc", "memory");
|
||||
: [dst] "+&d" (d.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[seed] "d" (s.pair), [opc] "i" (CPACF_PRNO)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -443,19 +455,19 @@ static inline void cpacf_prno(unsigned long func, void *param,
|
||||
static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
|
||||
u8 *cbuf, unsigned long cbuf_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) CPACF_PRNO_TRNG;
|
||||
register unsigned long r2 asm("2") = (unsigned long) ucbuf;
|
||||
register unsigned long r3 asm("3") = (unsigned long) ucbuf_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) cbuf;
|
||||
register unsigned long r5 asm("5") = (unsigned long) cbuf_len;
|
||||
union register_pair u, c;
|
||||
|
||||
u.even = (unsigned long)ucbuf;
|
||||
u.odd = (unsigned long)ucbuf_len;
|
||||
c.even = (unsigned long)cbuf;
|
||||
c.odd = (unsigned long)cbuf_len;
|
||||
asm volatile (
|
||||
" lghi 0,%[fc]\n"
|
||||
"0: .insn rre,%[opc] << 16,%[ucbuf],%[cbuf]\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [ucbuf] "+a" (r2), [ucbuflen] "+d" (r3),
|
||||
[cbuf] "+a" (r4), [cbuflen] "+d" (r5)
|
||||
: [fc] "d" (r0), [opc] "i" (CPACF_PRNO)
|
||||
: "cc", "memory");
|
||||
: [ucbuf] "+&d" (u.pair), [cbuf] "+&d" (c.pair)
|
||||
: [fc] "K" (CPACF_PRNO_TRNG), [opc] "i" (CPACF_PRNO)
|
||||
: "cc", "memory", "0");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -466,15 +478,15 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len,
|
||||
*/
|
||||
static inline void cpacf_pcc(unsigned long func, void *param)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
:
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCC)
|
||||
: "cc", "memory");
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_PCC)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -487,14 +499,14 @@ static inline void cpacf_pcc(unsigned long func, void *param)
|
||||
*/
|
||||
static inline void cpacf_pckmo(long func, void *param)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
" .insn rre,%[opc] << 16,0,0\n" /* PCKMO opcode */
|
||||
:
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_PCKMO)
|
||||
: "cc", "memory");
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_PCKMO)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -512,21 +524,23 @@ static inline void cpacf_kma(unsigned long func, void *param, u8 *dest,
|
||||
const u8 *src, unsigned long src_len,
|
||||
const u8 *aad, unsigned long aad_len)
|
||||
{
|
||||
register unsigned long r0 asm("0") = (unsigned long) func;
|
||||
register unsigned long r1 asm("1") = (unsigned long) param;
|
||||
register unsigned long r2 asm("2") = (unsigned long) src;
|
||||
register unsigned long r3 asm("3") = (unsigned long) src_len;
|
||||
register unsigned long r4 asm("4") = (unsigned long) aad;
|
||||
register unsigned long r5 asm("5") = (unsigned long) aad_len;
|
||||
register unsigned long r6 asm("6") = (unsigned long) dest;
|
||||
union register_pair d, s, a;
|
||||
|
||||
d.even = (unsigned long)dest;
|
||||
s.even = (unsigned long)src;
|
||||
s.odd = (unsigned long)src_len;
|
||||
a.even = (unsigned long)aad;
|
||||
a.odd = (unsigned long)aad_len;
|
||||
asm volatile(
|
||||
" lgr 0,%[fc]\n"
|
||||
" lgr 1,%[pba]\n"
|
||||
"0: .insn rrf,%[opc] << 16,%[dst],%[src],%[aad],0\n"
|
||||
" brc 1,0b\n" /* handle partial completion */
|
||||
: [dst] "+a" (r6), [src] "+a" (r2), [slen] "+d" (r3),
|
||||
[aad] "+a" (r4), [alen] "+d" (r5)
|
||||
: [fc] "d" (r0), [pba] "a" (r1), [opc] "i" (CPACF_KMA)
|
||||
: "cc", "memory");
|
||||
: [dst] "+&d" (d.pair), [src] "+&d" (s.pair),
|
||||
[aad] "+&d" (a.pair)
|
||||
: [fc] "d" (func), [pba] "d" ((unsigned long)param),
|
||||
[opc] "i" (CPACF_KMA)
|
||||
: "cc", "memory", "0", "1");
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_CPACF_H */
|
||||
|
@ -23,7 +23,7 @@
|
||||
#define MAX_ELF_HWCAP_FEATURES (8 * sizeof(elf_hwcap))
|
||||
#define MAX_CPU_FEATURES MAX_ELF_HWCAP_FEATURES
|
||||
|
||||
#define cpu_feature(feat) ilog2(HWCAP_S390_ ## feat)
|
||||
#define cpu_feature(feat) ilog2(HWCAP_ ## feat)
|
||||
|
||||
int cpu_have_feature(unsigned int nr);
|
||||
|
||||
|
@ -111,6 +111,23 @@ union ctlreg2 {
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg5 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long : 33;
|
||||
unsigned long pasteo: 25;
|
||||
unsigned long : 6;
|
||||
};
|
||||
};
|
||||
|
||||
union ctlreg15 {
|
||||
unsigned long val;
|
||||
struct {
|
||||
unsigned long lsea : 61;
|
||||
unsigned long : 3;
|
||||
};
|
||||
};
|
||||
|
||||
#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
|
||||
#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/time.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#define DEBUG_MAX_LEVEL 6 /* debug levels range from 0 to 6 */
|
||||
#define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */
|
||||
@ -391,38 +392,99 @@ int debug_register_view(debug_info_t *id, struct debug_view *view);
|
||||
|
||||
int debug_unregister_view(debug_info_t *id, struct debug_view *view);
|
||||
|
||||
#ifndef MODULE
|
||||
|
||||
/*
|
||||
define the debug levels:
|
||||
- 0 No debugging output to console or syslog
|
||||
- 1 Log internal errors to syslog, ignore check conditions
|
||||
- 2 Log internal errors and check conditions to syslog
|
||||
- 3 Log internal errors to console, log check conditions to syslog
|
||||
- 4 Log internal errors and check conditions to console
|
||||
- 5 panic on internal errors, log check conditions to console
|
||||
- 6 panic on both, internal errors and check conditions
|
||||
* Note: Initial page and area numbers must be fixed to allow static
|
||||
* initialization. This enables very early tracing. Changes to these values
|
||||
* must be reflected in __DEFINE_STATIC_AREA.
|
||||
*/
|
||||
#define EARLY_PAGES 8
|
||||
#define EARLY_AREAS 1
|
||||
|
||||
#ifndef DEBUG_LEVEL
|
||||
#define DEBUG_LEVEL 4
|
||||
#endif
|
||||
#define VNAME(var, suffix) __##var##_##suffix
|
||||
|
||||
#define INTERNAL_ERRMSG(x,y...) "E" __FILE__ "%d: " x, __LINE__, y
|
||||
#define INTERNAL_WRNMSG(x,y...) "W" __FILE__ "%d: " x, __LINE__, y
|
||||
#define INTERNAL_INFMSG(x,y...) "I" __FILE__ "%d: " x, __LINE__, y
|
||||
#define INTERNAL_DEBMSG(x,y...) "D" __FILE__ "%d: " x, __LINE__, y
|
||||
/*
|
||||
* Define static areas for early trace data. During boot debug_register_static()
|
||||
* will replace these with dynamically allocated areas to allow custom page and
|
||||
* area sizes, and dynamic resizing.
|
||||
*/
|
||||
#define __DEFINE_STATIC_AREA(var) \
|
||||
static char VNAME(var, data)[EARLY_PAGES][PAGE_SIZE] __initdata; \
|
||||
static debug_entry_t *VNAME(var, pages)[EARLY_PAGES] __initdata = { \
|
||||
(debug_entry_t *)VNAME(var, data)[0], \
|
||||
(debug_entry_t *)VNAME(var, data)[1], \
|
||||
(debug_entry_t *)VNAME(var, data)[2], \
|
||||
(debug_entry_t *)VNAME(var, data)[3], \
|
||||
(debug_entry_t *)VNAME(var, data)[4], \
|
||||
(debug_entry_t *)VNAME(var, data)[5], \
|
||||
(debug_entry_t *)VNAME(var, data)[6], \
|
||||
(debug_entry_t *)VNAME(var, data)[7], \
|
||||
}; \
|
||||
static debug_entry_t **VNAME(var, areas)[EARLY_AREAS] __initdata = { \
|
||||
(debug_entry_t **)VNAME(var, pages), \
|
||||
}; \
|
||||
static int VNAME(var, active_pages)[EARLY_AREAS] __initdata; \
|
||||
static int VNAME(var, active_entries)[EARLY_AREAS] __initdata
|
||||
|
||||
#if DEBUG_LEVEL > 0
|
||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_INFO(x...) printk(KERN_INFO PRINTK_HEADER x)
|
||||
#define PRINT_WARN(x...) printk(KERN_WARNING PRINTK_HEADER x)
|
||||
#define PRINT_ERR(x...) printk(KERN_ERR PRINTK_HEADER x)
|
||||
#define PRINT_FATAL(x...) panic(PRINTK_HEADER x)
|
||||
#else
|
||||
#define PRINT_DEBUG(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_INFO(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_WARN(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_ERR(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#define PRINT_FATAL(x...) printk(KERN_DEBUG PRINTK_HEADER x)
|
||||
#endif /* DASD_DEBUG */
|
||||
#define __DEBUG_INFO_INIT(var, _name, _buf_size) { \
|
||||
.next = NULL, \
|
||||
.prev = NULL, \
|
||||
.ref_count = REFCOUNT_INIT(1), \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
|
||||
.level = DEBUG_DEFAULT_LEVEL, \
|
||||
.nr_areas = EARLY_AREAS, \
|
||||
.pages_per_area = EARLY_PAGES, \
|
||||
.buf_size = (_buf_size), \
|
||||
.entry_size = sizeof(debug_entry_t) + (_buf_size), \
|
||||
.areas = VNAME(var, areas), \
|
||||
.active_area = 0, \
|
||||
.active_pages = VNAME(var, active_pages), \
|
||||
.active_entries = VNAME(var, active_entries), \
|
||||
.debugfs_root_entry = NULL, \
|
||||
.debugfs_entries = { NULL }, \
|
||||
.views = { NULL }, \
|
||||
.name = (_name), \
|
||||
.mode = 0600, \
|
||||
}
|
||||
|
||||
#define __REGISTER_STATIC_DEBUG_INFO(var, name, pages, areas, view) \
|
||||
static int __init VNAME(var, reg)(void) \
|
||||
{ \
|
||||
debug_register_static(&var, (pages), (areas)); \
|
||||
debug_register_view(&var, (view)); \
|
||||
return 0; \
|
||||
} \
|
||||
arch_initcall(VNAME(var, reg))
|
||||
|
||||
/**
|
||||
* DEFINE_STATIC_DEBUG_INFO - Define static debug_info_t
|
||||
*
|
||||
* @var: Name of debug_info_t variable
|
||||
* @name: Name of debug log (e.g. used for debugfs entry)
|
||||
* @pages_per_area: Number of pages per area
|
||||
* @nr_areas: Number of debug areas
|
||||
* @buf_size: Size of data area in each debug entry
|
||||
* @view: Pointer to debug view struct
|
||||
*
|
||||
* Define a static debug_info_t for early tracing. The associated debugfs log
|
||||
* is automatically registered with the specified debug view.
|
||||
*
|
||||
* Important: Users of this macro must not call any of the
|
||||
* debug_register/_unregister() functions for this debug_info_t!
|
||||
*
|
||||
* Note: Tracing will start with a fixed number of initial pages and areas.
|
||||
* The debug area will be changed to use the specified numbers during
|
||||
* arch_initcall.
|
||||
*/
|
||||
#define DEFINE_STATIC_DEBUG_INFO(var, name, pages, nr_areas, buf_size, view) \
|
||||
__DEFINE_STATIC_AREA(var); \
|
||||
static debug_info_t __refdata var = \
|
||||
__DEBUG_INFO_INIT(var, (name), (buf_size)); \
|
||||
__REGISTER_STATIC_DEBUG_INFO(var, name, pages, nr_areas, view)
|
||||
|
||||
void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
|
||||
|
||||
#endif /* MODULE */
|
||||
|
||||
#endif /* DEBUG_H */
|
||||
|
@ -309,6 +309,10 @@ int diag26c(void *req, void *resp, enum diag26c_sc subcode);
|
||||
|
||||
struct hypfs_diag0c_entry;
|
||||
|
||||
/*
|
||||
* This structure must contain only pointers/references into
|
||||
* the AMODE31 text section.
|
||||
*/
|
||||
struct diag_ops {
|
||||
int (*diag210)(struct diag210 *addr);
|
||||
int (*diag26c)(void *req, void *resp, enum diag26c_sc subcode);
|
||||
@ -317,6 +321,13 @@ struct diag_ops {
|
||||
void (*diag308_reset)(void);
|
||||
};
|
||||
|
||||
extern struct diag_ops diag_dma_ops;
|
||||
extern struct diag210 *__diag210_tmp_dma;
|
||||
extern struct diag_ops diag_amode31_ops;
|
||||
extern struct diag210 *__diag210_tmp_amode31;
|
||||
|
||||
int _diag210_amode31(struct diag210 *addr);
|
||||
int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode);
|
||||
int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode);
|
||||
void _diag0c_amode31(struct hypfs_diag0c_entry *entry);
|
||||
void _diag308_reset_amode31(void);
|
||||
|
||||
#endif /* _ASM_S390_DIAG_H */
|
||||
|
@ -91,29 +91,57 @@
|
||||
/* Keep this the last entry. */
|
||||
#define R_390_NUM 61
|
||||
|
||||
/* Bits present in AT_HWCAP. */
|
||||
#define HWCAP_S390_ESAN3 1
|
||||
#define HWCAP_S390_ZARCH 2
|
||||
#define HWCAP_S390_STFLE 4
|
||||
#define HWCAP_S390_MSA 8
|
||||
#define HWCAP_S390_LDISP 16
|
||||
#define HWCAP_S390_EIMM 32
|
||||
#define HWCAP_S390_DFP 64
|
||||
#define HWCAP_S390_HPAGE 128
|
||||
#define HWCAP_S390_ETF3EH 256
|
||||
#define HWCAP_S390_HIGH_GPRS 512
|
||||
#define HWCAP_S390_TE 1024
|
||||
#define HWCAP_S390_VXRS 2048
|
||||
#define HWCAP_S390_VXRS_BCD 4096
|
||||
#define HWCAP_S390_VXRS_EXT 8192
|
||||
#define HWCAP_S390_GS 16384
|
||||
#define HWCAP_S390_VXRS_EXT2 32768
|
||||
#define HWCAP_S390_VXRS_PDE 65536
|
||||
#define HWCAP_S390_SORT 131072
|
||||
#define HWCAP_S390_DFLT 262144
|
||||
enum {
|
||||
HWCAP_NR_ESAN3 = 0,
|
||||
HWCAP_NR_ZARCH = 1,
|
||||
HWCAP_NR_STFLE = 2,
|
||||
HWCAP_NR_MSA = 3,
|
||||
HWCAP_NR_LDISP = 4,
|
||||
HWCAP_NR_EIMM = 5,
|
||||
HWCAP_NR_DFP = 6,
|
||||
HWCAP_NR_HPAGE = 7,
|
||||
HWCAP_NR_ETF3EH = 8,
|
||||
HWCAP_NR_HIGH_GPRS = 9,
|
||||
HWCAP_NR_TE = 10,
|
||||
HWCAP_NR_VXRS = 11,
|
||||
HWCAP_NR_VXRS_BCD = 12,
|
||||
HWCAP_NR_VXRS_EXT = 13,
|
||||
HWCAP_NR_GS = 14,
|
||||
HWCAP_NR_VXRS_EXT2 = 15,
|
||||
HWCAP_NR_VXRS_PDE = 16,
|
||||
HWCAP_NR_SORT = 17,
|
||||
HWCAP_NR_DFLT = 18,
|
||||
HWCAP_NR_VXRS_PDE2 = 19,
|
||||
HWCAP_NR_NNPA = 20,
|
||||
HWCAP_NR_PCI_MIO = 21,
|
||||
HWCAP_NR_SIE = 22,
|
||||
HWCAP_NR_MAX
|
||||
};
|
||||
|
||||
/* Internal bits, not exposed via elf */
|
||||
#define HWCAP_INT_SIE 1UL
|
||||
/* Bits present in AT_HWCAP. */
|
||||
#define HWCAP_ESAN3 BIT(HWCAP_NR_ESAN3)
|
||||
#define HWCAP_ZARCH BIT(HWCAP_NR_ZARCH)
|
||||
#define HWCAP_STFLE BIT(HWCAP_NR_STFLE)
|
||||
#define HWCAP_MSA BIT(HWCAP_NR_MSA)
|
||||
#define HWCAP_LDISP BIT(HWCAP_NR_LDISP)
|
||||
#define HWCAP_EIMM BIT(HWCAP_NR_EIMM)
|
||||
#define HWCAP_DFP BIT(HWCAP_NR_DFP)
|
||||
#define HWCAP_HPAGE BIT(HWCAP_NR_HPAGE)
|
||||
#define HWCAP_ETF3EH BIT(HWCAP_NR_ETF3EH)
|
||||
#define HWCAP_HIGH_GPRS BIT(HWCAP_NR_HIGH_GPRS)
|
||||
#define HWCAP_TE BIT(HWCAP_NR_TE)
|
||||
#define HWCAP_VXRS BIT(HWCAP_NR_VXRS)
|
||||
#define HWCAP_VXRS_BCD BIT(HWCAP_NR_VXRS_BCD)
|
||||
#define HWCAP_VXRS_EXT BIT(HWCAP_NR_VXRS_EXT)
|
||||
#define HWCAP_GS BIT(HWCAP_NR_GS)
|
||||
#define HWCAP_VXRS_EXT2 BIT(HWCAP_NR_VXRS_EXT2)
|
||||
#define HWCAP_VXRS_PDE BIT(HWCAP_NR_VXRS_PDE)
|
||||
#define HWCAP_SORT BIT(HWCAP_NR_SORT)
|
||||
#define HWCAP_DFLT BIT(HWCAP_NR_DFLT)
|
||||
#define HWCAP_VXRS_PDE2 BIT(HWCAP_NR_VXRS_PDE2)
|
||||
#define HWCAP_NNPA BIT(HWCAP_NR_NNPA)
|
||||
#define HWCAP_PCI_MIO BIT(HWCAP_NR_PCI_MIO)
|
||||
#define HWCAP_SIE BIT(HWCAP_NR_SIE)
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
@ -209,10 +237,6 @@ struct arch_elf_state {
|
||||
extern unsigned long elf_hwcap;
|
||||
#define ELF_HWCAP (elf_hwcap)
|
||||
|
||||
/* Internal hardware capabilities, not exposed via elf */
|
||||
|
||||
extern unsigned long int_hwcap;
|
||||
|
||||
/* This yields a string that ld.so will use to load implementation
|
||||
specific libraries for optimization. This is more specific in
|
||||
intent than poking at uname or /proc/cpuinfo.
|
||||
|
@ -28,8 +28,8 @@ struct exception_table_entry
|
||||
long handler;
|
||||
};
|
||||
|
||||
extern struct exception_table_entry *__start_dma_ex_table;
|
||||
extern struct exception_table_entry *__stop_dma_ex_table;
|
||||
extern struct exception_table_entry *__start_amode31_ex_table;
|
||||
extern struct exception_table_entry *__stop_amode31_ex_table;
|
||||
|
||||
const struct exception_table_entry *s390_search_extables(unsigned long addr);
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
void ftrace_caller(void);
|
||||
|
||||
extern char ftrace_graph_caller_end;
|
||||
extern unsigned long ftrace_plt;
|
||||
extern void *ftrace_func;
|
||||
|
||||
struct dyn_arch_ftrace { };
|
||||
@ -31,10 +30,11 @@ struct dyn_arch_ftrace { };
|
||||
|
||||
struct module;
|
||||
struct dyn_ftrace;
|
||||
/*
|
||||
* Either -mhotpatch or -mnop-mcount is used - no explicit init is required
|
||||
*/
|
||||
static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) { return 0; }
|
||||
|
||||
bool ftrace_need_init_nop(void);
|
||||
#define ftrace_need_init_nop ftrace_need_init_nop
|
||||
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
||||
#define ftrace_init_nop ftrace_init_nop
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
@ -42,42 +42,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct ftrace_insn {
|
||||
u16 opc;
|
||||
s32 disp;
|
||||
} __packed;
|
||||
|
||||
static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/* brcl 0,0 */
|
||||
insn->opc = 0xc004;
|
||||
insn->disp = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_ftrace_nop(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
if (insn->disp == 0)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
|
||||
unsigned long ip)
|
||||
{
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
unsigned long target;
|
||||
|
||||
/* brasl r0,ftrace_caller */
|
||||
target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
|
||||
insn->opc = 0xc005;
|
||||
insn->disp = (target - ip) / 2;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though the system call numbers are identical for s390/s390x a
|
||||
* different system call table is used for compat tasks. This may lead
|
||||
|
21
arch/s390/include/asm/ftrace.lds.h
Normal file
21
arch/s390/include/asm/ftrace.lds.h
Normal file
@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef DIV_ROUND_UP
|
||||
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
#endif
|
||||
|
||||
#define SIZEOF_MCOUNT_LOC_ENTRY 8
|
||||
#define SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE 24
|
||||
#define FTRACE_HOTPATCH_TRAMPOLINES_SIZE(n) \
|
||||
DIV_ROUND_UP(SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE * (n), \
|
||||
SIZEOF_MCOUNT_LOC_ENTRY)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT \
|
||||
. = ALIGN(8); \
|
||||
__ftrace_hotpatch_trampolines_start = .; \
|
||||
. = . + FTRACE_HOTPATCH_TRAMPOLINES_SIZE(__stop_mcount_loc - \
|
||||
__start_mcount_loc); \
|
||||
__ftrace_hotpatch_trampolines_end = .;
|
||||
#else
|
||||
#define FTRACE_HOTPATCH_TRAMPOLINES_TEXT
|
||||
#endif
|
@ -12,6 +12,7 @@
|
||||
#include <asm/types.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/page.h>
|
||||
#include <uapi/asm/ipl.h>
|
||||
|
||||
struct ipl_parameter_block {
|
||||
|
42
arch/s390/include/asm/kfence.h
Normal file
42
arch/s390/include/asm/kfence.h
Normal file
@ -0,0 +1,42 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _ASM_S390_KFENCE_H
|
||||
#define _ASM_S390_KFENCE_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable);
|
||||
|
||||
static __always_inline bool arch_kfence_init_pool(void)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
|
||||
|
||||
/*
|
||||
* Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
|
||||
* but earlier where page table allocations still happen with memblock.
|
||||
* Reason is that arch_kfence_init_pool() gets called when the system
|
||||
* is still in a limbo state - disabling and enabling bottom halves is
|
||||
* not yet allowed, but that is what our page_table_alloc() would do.
|
||||
*/
|
||||
static __always_inline void kfence_split_mapping(void)
|
||||
{
|
||||
#ifdef CONFIG_KFENCE
|
||||
unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
|
||||
|
||||
set_memory_4k((unsigned long)__kfence_pool, pool_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
{
|
||||
__kernel_map_pages(virt_to_page(addr), 1, !protect);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_KFENCE_H */
|
@ -24,162 +24,79 @@
|
||||
#include <uapi/asm/kvm_para.h>
|
||||
#include <asm/diag.h>
|
||||
|
||||
static inline long __kvm_hypercall0(unsigned long nr)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register long __rc asm("2");
|
||||
#define HYPERCALL_FMT_0
|
||||
#define HYPERCALL_FMT_1 , "0" (r2)
|
||||
#define HYPERCALL_FMT_2 , "d" (r3) HYPERCALL_FMT_1
|
||||
#define HYPERCALL_FMT_3 , "d" (r4) HYPERCALL_FMT_2
|
||||
#define HYPERCALL_FMT_4 , "d" (r5) HYPERCALL_FMT_3
|
||||
#define HYPERCALL_FMT_5 , "d" (r6) HYPERCALL_FMT_4
|
||||
#define HYPERCALL_FMT_6 , "d" (r7) HYPERCALL_FMT_5
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr): "memory", "cc");
|
||||
return __rc;
|
||||
#define HYPERCALL_PARM_0
|
||||
#define HYPERCALL_PARM_1 , unsigned long arg1
|
||||
#define HYPERCALL_PARM_2 HYPERCALL_PARM_1, unsigned long arg2
|
||||
#define HYPERCALL_PARM_3 HYPERCALL_PARM_2, unsigned long arg3
|
||||
#define HYPERCALL_PARM_4 HYPERCALL_PARM_3, unsigned long arg4
|
||||
#define HYPERCALL_PARM_5 HYPERCALL_PARM_4, unsigned long arg5
|
||||
#define HYPERCALL_PARM_6 HYPERCALL_PARM_5, unsigned long arg6
|
||||
|
||||
#define HYPERCALL_REGS_0
|
||||
#define HYPERCALL_REGS_1 \
|
||||
register unsigned long r2 asm("2") = arg1
|
||||
#define HYPERCALL_REGS_2 \
|
||||
HYPERCALL_REGS_1; \
|
||||
register unsigned long r3 asm("3") = arg2
|
||||
#define HYPERCALL_REGS_3 \
|
||||
HYPERCALL_REGS_2; \
|
||||
register unsigned long r4 asm("4") = arg3
|
||||
#define HYPERCALL_REGS_4 \
|
||||
HYPERCALL_REGS_3; \
|
||||
register unsigned long r5 asm("5") = arg4
|
||||
#define HYPERCALL_REGS_5 \
|
||||
HYPERCALL_REGS_4; \
|
||||
register unsigned long r6 asm("6") = arg5
|
||||
#define HYPERCALL_REGS_6 \
|
||||
HYPERCALL_REGS_5; \
|
||||
register unsigned long r7 asm("7") = arg6
|
||||
|
||||
#define HYPERCALL_ARGS_0
|
||||
#define HYPERCALL_ARGS_1 , arg1
|
||||
#define HYPERCALL_ARGS_2 HYPERCALL_ARGS_1, arg2
|
||||
#define HYPERCALL_ARGS_3 HYPERCALL_ARGS_2, arg3
|
||||
#define HYPERCALL_ARGS_4 HYPERCALL_ARGS_3, arg4
|
||||
#define HYPERCALL_ARGS_5 HYPERCALL_ARGS_4, arg5
|
||||
#define HYPERCALL_ARGS_6 HYPERCALL_ARGS_5, arg6
|
||||
|
||||
#define GENERATE_KVM_HYPERCALL_FUNC(args) \
|
||||
static inline \
|
||||
long __kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \
|
||||
{ \
|
||||
register unsigned long __nr asm("1") = nr; \
|
||||
register long __rc asm("2"); \
|
||||
HYPERCALL_REGS_##args; \
|
||||
\
|
||||
asm volatile ( \
|
||||
" diag 2,4,0x500\n" \
|
||||
: "=d" (__rc) \
|
||||
: "d" (__nr) HYPERCALL_FMT_##args \
|
||||
: "memory", "cc"); \
|
||||
return __rc; \
|
||||
} \
|
||||
\
|
||||
static inline \
|
||||
long kvm_hypercall##args(unsigned long nr HYPERCALL_PARM_##args) \
|
||||
{ \
|
||||
diag_stat_inc(DIAG_STAT_X500); \
|
||||
return __kvm_hypercall##args(nr HYPERCALL_ARGS_##args); \
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall0(unsigned long nr)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall0(nr);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall1(unsigned long nr, unsigned long p1)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall1(nr, p1);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall2(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register unsigned long __p2 asm("3") = p2;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
|
||||
: "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall2(nr, p1, p2);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall3(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register unsigned long __p2 asm("3") = p2;
|
||||
register unsigned long __p3 asm("4") = p3;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
|
||||
"d" (__p3) : "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall3(nr, p1, p2, p3);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall4(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register unsigned long __p2 asm("3") = p2;
|
||||
register unsigned long __p3 asm("4") = p3;
|
||||
register unsigned long __p4 asm("5") = p4;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
|
||||
"d" (__p3), "d" (__p4) : "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall4(nr, p1, p2, p3, p4);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall5(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4, unsigned long p5)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register unsigned long __p2 asm("3") = p2;
|
||||
register unsigned long __p3 asm("4") = p3;
|
||||
register unsigned long __p4 asm("5") = p4;
|
||||
register unsigned long __p5 asm("6") = p5;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
|
||||
"d" (__p3), "d" (__p4), "d" (__p5) : "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4, unsigned long p5)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall5(nr, p1, p2, p3, p4, p5);
|
||||
}
|
||||
|
||||
static inline long __kvm_hypercall6(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4, unsigned long p5,
|
||||
unsigned long p6)
|
||||
{
|
||||
register unsigned long __nr asm("1") = nr;
|
||||
register unsigned long __p1 asm("2") = p1;
|
||||
register unsigned long __p2 asm("3") = p2;
|
||||
register unsigned long __p3 asm("4") = p3;
|
||||
register unsigned long __p4 asm("5") = p4;
|
||||
register unsigned long __p5 asm("6") = p5;
|
||||
register unsigned long __p6 asm("7") = p6;
|
||||
register long __rc asm("2");
|
||||
|
||||
asm volatile ("diag 2,4,0x500\n"
|
||||
: "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
|
||||
"d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
|
||||
: "memory", "cc");
|
||||
return __rc;
|
||||
}
|
||||
|
||||
static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
|
||||
unsigned long p2, unsigned long p3,
|
||||
unsigned long p4, unsigned long p5,
|
||||
unsigned long p6)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X500);
|
||||
return __kvm_hypercall6(nr, p1, p2, p3, p4, p5, p6);
|
||||
}
|
||||
GENERATE_KVM_HYPERCALL_FUNC(0)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(1)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(2)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(3)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(4)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(5)
|
||||
GENERATE_KVM_HYPERCALL_FUNC(6)
|
||||
|
||||
/* kvm on s390 is always paravirtualization enabled */
|
||||
static inline int kvm_para_available(void)
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
#define EX_TABLE(_fault, _target) \
|
||||
__EX_TABLE(__ex_table, _fault, _target)
|
||||
#define EX_TABLE_DMA(_fault, _target) \
|
||||
__EX_TABLE(.dma.ex_table, _fault, _target)
|
||||
#define EX_TABLE_AMODE31(_fault, _target) \
|
||||
__EX_TABLE(.amode31.ex_table, _fault, _target)
|
||||
|
||||
#endif
|
||||
|
@ -124,7 +124,8 @@ struct lowcore {
|
||||
/* Restart function and parameter. */
|
||||
__u64 restart_fn; /* 0x0370 */
|
||||
__u64 restart_data; /* 0x0378 */
|
||||
__u64 restart_source; /* 0x0380 */
|
||||
__u32 restart_source; /* 0x0380 */
|
||||
__u32 restart_flags; /* 0x0384 */
|
||||
|
||||
/* Address space pointer. */
|
||||
__u64 kernel_asce; /* 0x0388 */
|
||||
|
@ -8,16 +8,14 @@
|
||||
* This file contains the s390 architecture specific module code.
|
||||
*/
|
||||
|
||||
struct mod_arch_syminfo
|
||||
{
|
||||
struct mod_arch_syminfo {
|
||||
unsigned long got_offset;
|
||||
unsigned long plt_offset;
|
||||
int got_initialized;
|
||||
int plt_initialized;
|
||||
};
|
||||
|
||||
struct mod_arch_specific
|
||||
{
|
||||
struct mod_arch_specific {
|
||||
/* Starting offset of got in the module core memory. */
|
||||
unsigned long got_offset;
|
||||
/* Starting offset of plt in the module core memory. */
|
||||
@ -30,6 +28,14 @@ struct mod_arch_specific
|
||||
int nsyms;
|
||||
/* Additional symbol information (got and plt offsets). */
|
||||
struct mod_arch_syminfo *syminfo;
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
/* Start of memory reserved for ftrace hotpatch trampolines. */
|
||||
struct ftrace_hotpatch_trampoline *trampolines_start;
|
||||
/* End of memory reserved for ftrace hotpatch trampolines. */
|
||||
struct ftrace_hotpatch_trampoline *trampolines_end;
|
||||
/* Next unused ftrace hotpatch trampoline slot. */
|
||||
struct ftrace_hotpatch_trampoline *next_trampoline;
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
};
|
||||
|
||||
#endif /* _ASM_S390_MODULE_H */
|
||||
|
@ -144,9 +144,6 @@ struct page;
|
||||
void arch_free_page(struct page *page, int order);
|
||||
void arch_alloc_page(struct page *page, int order);
|
||||
void arch_set_page_dat(struct page *page, int order);
|
||||
void arch_set_page_nodat(struct page *page, int order);
|
||||
int arch_test_page_nodat(struct page *page);
|
||||
void arch_set_page_states(int make_stable);
|
||||
|
||||
static inline int devmem_is_allowed(unsigned long pfn)
|
||||
{
|
||||
|
@ -216,9 +216,10 @@ void zpci_remove_reserved_devices(void);
|
||||
int clp_setup_writeback_mio(void);
|
||||
int clp_scan_pci_devices(void);
|
||||
int clp_query_pci_fn(struct zpci_dev *zdev);
|
||||
int clp_enable_fh(struct zpci_dev *, u8);
|
||||
int clp_disable_fh(struct zpci_dev *);
|
||||
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as);
|
||||
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh);
|
||||
int clp_get_state(u32 fid, enum zpci_state *state);
|
||||
int clp_refresh_fh(u32 fid, u32 *fh);
|
||||
|
||||
/* UID */
|
||||
void update_uid_checking(bool new);
|
||||
@ -271,6 +272,8 @@ struct zpci_dev *get_zdev_by_fid(u32);
|
||||
/* DMA */
|
||||
int zpci_dma_init(void);
|
||||
void zpci_dma_exit(void);
|
||||
int zpci_dma_init_device(struct zpci_dev *zdev);
|
||||
int zpci_dma_exit_device(struct zpci_dev *zdev);
|
||||
|
||||
/* IRQ */
|
||||
int __init zpci_irq_init(void);
|
||||
|
@ -182,8 +182,6 @@ static inline unsigned long *get_st_pto(unsigned long entry)
|
||||
}
|
||||
|
||||
/* Prototypes */
|
||||
int zpci_dma_init_device(struct zpci_dev *);
|
||||
void zpci_dma_exit_device(struct zpci_dev *);
|
||||
void dma_free_seg_table(unsigned long);
|
||||
unsigned long *dma_alloc_cpu_table(void);
|
||||
void dma_cleanup_tables(unsigned long *);
|
||||
|
@ -67,15 +67,15 @@ extern unsigned long zero_page_mask;
|
||||
/* TODO: s390 cannot support io_remap_pfn_range... */
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
|
||||
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
|
||||
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
#define pud_ERROR(e) \
|
||||
printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
|
||||
pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
#define p4d_ERROR(e) \
|
||||
printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
|
||||
pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
|
||||
pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/*
|
||||
* The vmalloc and module area will always be on the topmost area of the
|
||||
|
@ -26,6 +26,8 @@
|
||||
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
|
||||
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
|
||||
|
||||
#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
@ -291,16 +291,15 @@ struct qdio_ssqd_desc {
|
||||
typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
|
||||
int, int, unsigned long);
|
||||
|
||||
/* qdio errors reported to the upper-layer program */
|
||||
/* qdio errors reported through the queue handlers: */
|
||||
#define QDIO_ERROR_ACTIVATE 0x0001
|
||||
#define QDIO_ERROR_GET_BUF_STATE 0x0002
|
||||
#define QDIO_ERROR_SET_BUF_STATE 0x0004
|
||||
|
||||
/* extra info for completed SBALs: */
|
||||
#define QDIO_ERROR_SLSB_STATE 0x0100
|
||||
#define QDIO_ERROR_SLSB_PENDING 0x0200
|
||||
|
||||
#define QDIO_ERROR_FATAL 0x00ff
|
||||
#define QDIO_ERROR_TEMPORARY 0xff00
|
||||
|
||||
/* for qdio_cleanup */
|
||||
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
|
||||
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
|
||||
@ -312,8 +311,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
|
||||
* @qib_param_field_format: format for qib_parm_field
|
||||
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
|
||||
* @qib_rflags: rflags to set
|
||||
* @input_slib_elements: pointer to no_input_qs * 128 words of data or NULL
|
||||
* @output_slib_elements: pointer to no_output_qs * 128 words of data or NULL
|
||||
* @no_input_qs: number of input queues
|
||||
* @no_output_qs: number of output queues
|
||||
* @input_handler: handler to be called for input queues
|
||||
@ -330,27 +327,18 @@ struct qdio_initialize {
|
||||
unsigned int qib_param_field_format;
|
||||
unsigned char *qib_param_field;
|
||||
unsigned char qib_rflags;
|
||||
unsigned long *input_slib_elements;
|
||||
unsigned long *output_slib_elements;
|
||||
unsigned int no_input_qs;
|
||||
unsigned int no_output_qs;
|
||||
qdio_handler_t *input_handler;
|
||||
qdio_handler_t *output_handler;
|
||||
void (*irq_poll)(struct ccw_device *cdev, unsigned long data);
|
||||
unsigned int scan_threshold;
|
||||
unsigned long int_parm;
|
||||
struct qdio_buffer ***input_sbal_addr_array;
|
||||
struct qdio_buffer ***output_sbal_addr_array;
|
||||
};
|
||||
|
||||
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
|
||||
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_establish */
|
||||
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
|
||||
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
|
||||
|
||||
#define QDIO_FLAG_SYNC_INPUT 0x01
|
||||
#define QDIO_FLAG_SYNC_OUTPUT 0x02
|
||||
#define QDIO_FLAG_PCI_OUT 0x10
|
||||
|
||||
int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
|
||||
void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
|
||||
@ -367,7 +355,6 @@ extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
|
||||
unsigned int bufnr, unsigned int count, struct qaob *aob);
|
||||
extern int qdio_start_irq(struct ccw_device *cdev);
|
||||
extern int qdio_stop_irq(struct ccw_device *cdev);
|
||||
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
|
||||
extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
|
||||
bool is_input, unsigned int *bufnr,
|
||||
unsigned int *error);
|
||||
|
@ -8,8 +8,6 @@
|
||||
#define _ASM_S390_SCLP_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
#define SCLP_CHP_INFO_MASK_SIZE 32
|
||||
#define EARLY_SCCB_SIZE PAGE_SIZE
|
||||
@ -19,6 +17,10 @@
|
||||
/* 24 + 16 * SCLP_MAX_CORES */
|
||||
#define EXT_SCCB_READ_CPU (3 * PAGE_SIZE)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/chpid.h>
|
||||
#include <asm/cpu.h>
|
||||
|
||||
struct sclp_chp_info {
|
||||
u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
|
||||
u8 standby[SCLP_CHP_INFO_MASK_SIZE];
|
||||
@ -113,6 +115,9 @@ struct zpci_report_error_header {
|
||||
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
|
||||
} __packed;
|
||||
|
||||
extern char *sclp_early_sccb;
|
||||
|
||||
void sclp_early_set_buffer(void *sccb);
|
||||
int sclp_early_read_info(void);
|
||||
int sclp_early_read_storage_info(void);
|
||||
int sclp_early_get_core_info(struct sclp_core_info *info);
|
||||
@ -147,4 +152,5 @@ static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
|
||||
return _sclp_get_core_info(info);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_S390_SCLP_H */
|
||||
|
@ -35,7 +35,7 @@ static inline int arch_is_kernel_initmem_freed(unsigned long addr)
|
||||
*/
|
||||
#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
|
||||
|
||||
extern unsigned long __sdma, __edma;
|
||||
extern unsigned long __stext_dma, __etext_dma;
|
||||
extern unsigned long __samode31, __eamode31;
|
||||
extern unsigned long __stext_amode31, __etext_amode31;
|
||||
|
||||
#endif
|
||||
|
@ -10,6 +10,7 @@ extern struct mutex cpa_mutex;
|
||||
#define SET_MEMORY_RW 2UL
|
||||
#define SET_MEMORY_NX 4UL
|
||||
#define SET_MEMORY_X 8UL
|
||||
#define SET_MEMORY_4K 16UL
|
||||
|
||||
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
|
||||
|
||||
@ -33,4 +34,9 @@ static inline int set_memory_x(unsigned long addr, int numpages)
|
||||
return __set_memory(addr, numpages, SET_MEMORY_X);
|
||||
}
|
||||
|
||||
static inline int set_memory_4k(unsigned long addr, int numpages)
|
||||
{
|
||||
return __set_memory(addr, numpages, SET_MEMORY_4K);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -10,11 +10,8 @@
|
||||
#include <uapi/asm/setup.h>
|
||||
#include <linux/build_bug.h>
|
||||
|
||||
#define EP_OFFSET 0x10008
|
||||
#define EP_STRING "S390EP"
|
||||
#define PARMAREA 0x10400
|
||||
#define EARLY_SCCB_OFFSET 0x11000
|
||||
#define HEAD_END 0x12000
|
||||
#define HEAD_END 0x11000
|
||||
|
||||
/*
|
||||
* Machine features detected in early.c
|
||||
@ -36,6 +33,7 @@
|
||||
#define MACHINE_FLAG_NX BIT(15)
|
||||
#define MACHINE_FLAG_GS BIT(16)
|
||||
#define MACHINE_FLAG_SCC BIT(17)
|
||||
#define MACHINE_FLAG_PCI_MIO BIT(18)
|
||||
|
||||
#define LPP_MAGIC BIT(31)
|
||||
#define LPP_PID_MASK _AC(0xffffffff, UL)
|
||||
@ -45,28 +43,11 @@
|
||||
#define STARTUP_NORMAL_OFFSET 0x10000
|
||||
#define STARTUP_KDUMP_OFFSET 0x10010
|
||||
|
||||
/* Offsets to parameters in kernel/head.S */
|
||||
|
||||
#define IPL_DEVICE_OFFSET 0x10400
|
||||
#define INITRD_START_OFFSET 0x10408
|
||||
#define INITRD_SIZE_OFFSET 0x10410
|
||||
#define OLDMEM_BASE_OFFSET 0x10418
|
||||
#define OLDMEM_SIZE_OFFSET 0x10420
|
||||
#define KERNEL_VERSION_OFFSET 0x10428
|
||||
#define COMMAND_LINE_OFFSET 0x10480
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET))
|
||||
#define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET))
|
||||
#define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET))
|
||||
#define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET))
|
||||
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
|
||||
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
|
||||
|
||||
struct parmarea {
|
||||
unsigned long ipl_device; /* 0x10400 */
|
||||
unsigned long initrd_start; /* 0x10408 */
|
||||
@ -110,6 +91,7 @@ extern unsigned long mio_wb_bit_mask;
|
||||
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
|
||||
#define MACHINE_HAS_GS (S390_lowcore.machine_flags & MACHINE_FLAG_GS)
|
||||
#define MACHINE_HAS_SCC (S390_lowcore.machine_flags & MACHINE_FLAG_SCC)
|
||||
#define MACHINE_HAS_PCI_MIO (S390_lowcore.machine_flags & MACHINE_FLAG_PCI_MIO)
|
||||
|
||||
/*
|
||||
* Console mode. Override with conmode=
|
||||
@ -161,20 +143,22 @@ static inline unsigned long kaslr_offset(void)
|
||||
|
||||
extern int is_full_image;
|
||||
|
||||
struct initrd_data {
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
extern struct initrd_data initrd_data;
|
||||
|
||||
struct oldmem_data {
|
||||
unsigned long start;
|
||||
unsigned long size;
|
||||
};
|
||||
extern struct oldmem_data oldmem_data;
|
||||
|
||||
static inline u32 gen_lpswe(unsigned long addr)
|
||||
{
|
||||
BUILD_BUG_ON(addr > 0xfff);
|
||||
return 0xb2b20000 | addr;
|
||||
}
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
|
||||
#define INITRD_START (INITRD_START_OFFSET)
|
||||
#define INITRD_SIZE (INITRD_SIZE_OFFSET)
|
||||
#define OLDMEM_BASE (OLDMEM_BASE_OFFSET)
|
||||
#define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET)
|
||||
#define COMMAND_LINE (COMMAND_LINE_OFFSET)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* _ASM_S390_SETUP_H */
|
||||
|
@ -104,4 +104,63 @@ static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
|
||||
return false;
|
||||
}
|
||||
|
||||
#define SYSCALL_FMT_0
|
||||
#define SYSCALL_FMT_1 , "0" (r2)
|
||||
#define SYSCALL_FMT_2 , "d" (r3) SYSCALL_FMT_1
|
||||
#define SYSCALL_FMT_3 , "d" (r4) SYSCALL_FMT_2
|
||||
#define SYSCALL_FMT_4 , "d" (r5) SYSCALL_FMT_3
|
||||
#define SYSCALL_FMT_5 , "d" (r6) SYSCALL_FMT_4
|
||||
#define SYSCALL_FMT_6 , "d" (r7) SYSCALL_FMT_5
|
||||
|
||||
#define SYSCALL_PARM_0
|
||||
#define SYSCALL_PARM_1 , long arg1
|
||||
#define SYSCALL_PARM_2 SYSCALL_PARM_1, long arg2
|
||||
#define SYSCALL_PARM_3 SYSCALL_PARM_2, long arg3
|
||||
#define SYSCALL_PARM_4 SYSCALL_PARM_3, long arg4
|
||||
#define SYSCALL_PARM_5 SYSCALL_PARM_4, long arg5
|
||||
#define SYSCALL_PARM_6 SYSCALL_PARM_5, long arg6
|
||||
|
||||
#define SYSCALL_REGS_0
|
||||
#define SYSCALL_REGS_1 \
|
||||
register long r2 asm("2") = arg1
|
||||
#define SYSCALL_REGS_2 \
|
||||
SYSCALL_REGS_1; \
|
||||
register long r3 asm("3") = arg2
|
||||
#define SYSCALL_REGS_3 \
|
||||
SYSCALL_REGS_2; \
|
||||
register long r4 asm("4") = arg3
|
||||
#define SYSCALL_REGS_4 \
|
||||
SYSCALL_REGS_3; \
|
||||
register long r5 asm("5") = arg4
|
||||
#define SYSCALL_REGS_5 \
|
||||
SYSCALL_REGS_4; \
|
||||
register long r6 asm("6") = arg5
|
||||
#define SYSCALL_REGS_6 \
|
||||
SYSCALL_REGS_5; \
|
||||
register long r7 asm("7") = arg6
|
||||
|
||||
#define GENERATE_SYSCALL_FUNC(nr) \
|
||||
static __always_inline \
|
||||
long syscall##nr(unsigned long syscall SYSCALL_PARM_##nr) \
|
||||
{ \
|
||||
register unsigned long r1 asm ("1") = syscall; \
|
||||
register long rc asm ("2"); \
|
||||
SYSCALL_REGS_##nr; \
|
||||
\
|
||||
asm volatile ( \
|
||||
" svc 0\n" \
|
||||
: "=d" (rc) \
|
||||
: "d" (r1) SYSCALL_FMT_##nr \
|
||||
: "memory"); \
|
||||
return rc; \
|
||||
}
|
||||
|
||||
GENERATE_SYSCALL_FUNC(0)
|
||||
GENERATE_SYSCALL_FUNC(1)
|
||||
GENERATE_SYSCALL_FUNC(2)
|
||||
GENERATE_SYSCALL_FUNC(3)
|
||||
GENERATE_SYSCALL_FUNC(4)
|
||||
GENERATE_SYSCALL_FUNC(5)
|
||||
GENERATE_SYSCALL_FUNC(6)
|
||||
|
||||
#endif /* _ASM_SYSCALL_H */
|
||||
|
@ -356,11 +356,9 @@ int uv_convert_from_secure(unsigned long paddr);
|
||||
int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
|
||||
|
||||
void setup_uv(void);
|
||||
void adjust_to_uv_max(unsigned long *vmax);
|
||||
#else
|
||||
#define is_prot_virt_host() 0
|
||||
static inline void setup_uv(void) {}
|
||||
static inline void adjust_to_uv_max(unsigned long *vmax) {}
|
||||
|
||||
static inline int uv_destroy_page(unsigned long paddr)
|
||||
{
|
||||
@ -373,10 +371,4 @@ static inline int uv_convert_from_secure(unsigned long paddr)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
|
||||
void uv_query_info(void);
|
||||
#else
|
||||
static inline void uv_query_info(void) {}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_S390_UV_H */
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#define VDSO_HAS_CLOCK_GETRES 1
|
||||
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/timex.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <linux/compiler.h>
|
||||
@ -35,35 +36,20 @@ static inline u64 __arch_get_hw_counter(s32 clock_mode, const struct vdso_data *
|
||||
static __always_inline
|
||||
long clock_gettime_fallback(clockid_t clkid, struct __kernel_timespec *ts)
|
||||
{
|
||||
register unsigned long r1 __asm__("r1") = __NR_clock_gettime;
|
||||
register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
|
||||
register void *r3 __asm__("r3") = ts;
|
||||
|
||||
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
|
||||
return r2;
|
||||
return syscall2(__NR_clock_gettime, (long)clkid, (long)ts);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long gettimeofday_fallback(register struct __kernel_old_timeval *tv,
|
||||
register struct timezone *tz)
|
||||
{
|
||||
register unsigned long r1 __asm__("r1") = __NR_gettimeofday;
|
||||
register unsigned long r2 __asm__("r2") = (unsigned long)tv;
|
||||
register void *r3 __asm__("r3") = tz;
|
||||
|
||||
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
|
||||
return r2;
|
||||
return syscall2(__NR_gettimeofday, (long)tv, (long)tz);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
|
||||
{
|
||||
register unsigned long r1 __asm__("r1") = __NR_clock_getres;
|
||||
register unsigned long r2 __asm__("r2") = (unsigned long)clkid;
|
||||
register void *r3 __asm__("r3") = ts;
|
||||
|
||||
asm ("svc 0\n" : "+d" (r2) : "d" (r1), "d" (r3) : "cc", "memory");
|
||||
return r2;
|
||||
return syscall2(__NR_clock_getres, (long)clkid, (long)ts);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_TIME_NS
|
||||
|
@ -40,7 +40,7 @@ obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
|
||||
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
|
||||
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
|
||||
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
|
||||
obj-y += smp.o
|
||||
obj-y += smp.o text_amode31.o
|
||||
|
||||
extra-y += head64.o vmlinux.lds
|
||||
|
||||
|
@ -116,6 +116,7 @@ int main(void)
|
||||
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
|
||||
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
|
||||
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
|
||||
OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
|
||||
OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
|
||||
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
|
||||
OFFSET(__LC_LPP, lowcore, lpp);
|
||||
@ -152,5 +153,12 @@ int main(void)
|
||||
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
|
||||
/* sizeof kernel parameter area */
|
||||
DEFINE(__PARMAREA_SIZE, sizeof(struct parmarea));
|
||||
/* kernel parameter area offsets */
|
||||
DEFINE(IPL_DEVICE, PARMAREA + offsetof(struct parmarea, ipl_device));
|
||||
DEFINE(INITRD_START, PARMAREA + offsetof(struct parmarea, initrd_start));
|
||||
DEFINE(INITRD_SIZE, PARMAREA + offsetof(struct parmarea, initrd_size));
|
||||
DEFINE(OLDMEM_BASE, PARMAREA + offsetof(struct parmarea, oldmem_base));
|
||||
DEFINE(OLDMEM_SIZE, PARMAREA + offsetof(struct parmarea, oldmem_size));
|
||||
DEFINE(COMMAND_LINE, PARMAREA + offsetof(struct parmarea, command_line));
|
||||
return 0;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
|
||||
|
||||
while (count) {
|
||||
from = __pa(src);
|
||||
if (!OLDMEM_BASE && from < sclp.hsa_size) {
|
||||
if (!oldmem_data.start && from < sclp.hsa_size) {
|
||||
/* Copy from zfcp/nvme dump HSA area */
|
||||
len = min(count, sclp.hsa_size - from);
|
||||
rc = memcpy_hsa_kernel(dst, from, len);
|
||||
@ -148,12 +148,12 @@ int copy_oldmem_kernel(void *dst, void *src, size_t count)
|
||||
return rc;
|
||||
} else {
|
||||
/* Check for swapped kdump oldmem areas */
|
||||
if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
|
||||
from -= OLDMEM_BASE;
|
||||
len = min(count, OLDMEM_SIZE - from);
|
||||
} else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
|
||||
len = min(count, OLDMEM_SIZE - from);
|
||||
from += OLDMEM_BASE;
|
||||
if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
|
||||
from -= oldmem_data.start;
|
||||
len = min(count, oldmem_data.size - from);
|
||||
} else if (oldmem_data.start && from < oldmem_data.size) {
|
||||
len = min(count, oldmem_data.size - from);
|
||||
from += oldmem_data.start;
|
||||
} else {
|
||||
len = count;
|
||||
}
|
||||
@ -183,7 +183,7 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
|
||||
|
||||
while (count) {
|
||||
from = __pa(src);
|
||||
if (!OLDMEM_BASE && from < sclp.hsa_size) {
|
||||
if (!oldmem_data.start && from < sclp.hsa_size) {
|
||||
/* Copy from zfcp/nvme dump HSA area */
|
||||
len = min(count, sclp.hsa_size - from);
|
||||
rc = memcpy_hsa_user(dst, from, len);
|
||||
@ -191,12 +191,12 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
|
||||
return rc;
|
||||
} else {
|
||||
/* Check for swapped kdump oldmem areas */
|
||||
if (OLDMEM_BASE && from - OLDMEM_BASE < OLDMEM_SIZE) {
|
||||
from -= OLDMEM_BASE;
|
||||
len = min(count, OLDMEM_SIZE - from);
|
||||
} else if (OLDMEM_BASE && from < OLDMEM_SIZE) {
|
||||
len = min(count, OLDMEM_SIZE - from);
|
||||
from += OLDMEM_BASE;
|
||||
if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
|
||||
from -= oldmem_data.size;
|
||||
len = min(count, oldmem_data.size - from);
|
||||
} else if (oldmem_data.start && from < oldmem_data.size) {
|
||||
len = min(count, oldmem_data.size - from);
|
||||
from += oldmem_data.start;
|
||||
} else {
|
||||
len = count;
|
||||
}
|
||||
@ -243,10 +243,10 @@ static int remap_oldmem_pfn_range_kdump(struct vm_area_struct *vma,
|
||||
unsigned long size_old;
|
||||
int rc;
|
||||
|
||||
if (pfn < OLDMEM_SIZE >> PAGE_SHIFT) {
|
||||
size_old = min(size, OLDMEM_SIZE - (pfn << PAGE_SHIFT));
|
||||
if (pfn < oldmem_data.size >> PAGE_SHIFT) {
|
||||
size_old = min(size, oldmem_data.size - (pfn << PAGE_SHIFT));
|
||||
rc = remap_pfn_range(vma, from,
|
||||
pfn + (OLDMEM_BASE >> PAGE_SHIFT),
|
||||
pfn + (oldmem_data.start >> PAGE_SHIFT),
|
||||
size_old, prot);
|
||||
if (rc || size == size_old)
|
||||
return rc;
|
||||
@ -288,7 +288,7 @@ static int remap_oldmem_pfn_range_zfcpdump(struct vm_area_struct *vma,
|
||||
int remap_oldmem_pfn_range(struct vm_area_struct *vma, unsigned long from,
|
||||
unsigned long pfn, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
if (OLDMEM_BASE)
|
||||
if (oldmem_data.start)
|
||||
return remap_oldmem_pfn_range_kdump(vma, from, pfn, size, prot);
|
||||
else
|
||||
return remap_oldmem_pfn_range_zfcpdump(vma, from, pfn, size,
|
||||
@ -633,17 +633,17 @@ int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
|
||||
u64 hdr_off;
|
||||
|
||||
/* If we are not in kdump or zfcp/nvme dump mode return */
|
||||
if (!OLDMEM_BASE && !is_ipl_type_dump())
|
||||
if (!oldmem_data.start && !is_ipl_type_dump())
|
||||
return 0;
|
||||
/* If we cannot get HSA size for zfcp/nvme dump return error */
|
||||
if (is_ipl_type_dump() && !sclp.hsa_size)
|
||||
return -ENODEV;
|
||||
|
||||
/* For kdump, exclude previous crashkernel memory */
|
||||
if (OLDMEM_BASE) {
|
||||
oldmem_region.base = OLDMEM_BASE;
|
||||
oldmem_region.size = OLDMEM_SIZE;
|
||||
oldmem_type.total_size = OLDMEM_SIZE;
|
||||
if (oldmem_data.start) {
|
||||
oldmem_region.base = oldmem_data.start;
|
||||
oldmem_region.size = oldmem_data.size;
|
||||
oldmem_type.total_size = oldmem_data.size;
|
||||
}
|
||||
|
||||
mem_chunk_cnt = get_mem_chunk_cnt();
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <asm/debug.h>
|
||||
@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
|
||||
char *out_buf, const char *in_buf);
|
||||
static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
|
||||
char *out_buf, debug_sprintf_entry_t *curr_event);
|
||||
static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
|
||||
static void debug_events_append(debug_info_t *dest, debug_info_t *src);
|
||||
|
||||
/* globals */
|
||||
|
||||
@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
|
||||
goto out;
|
||||
|
||||
rc->mode = mode & ~S_IFMT;
|
||||
|
||||
/* create root directory */
|
||||
rc->debugfs_root_entry = debugfs_create_dir(rc->name,
|
||||
debug_debugfs_root_entry);
|
||||
|
||||
/* append new element to linked list */
|
||||
if (!debug_area_first) {
|
||||
/* first element in list */
|
||||
debug_area_first = rc;
|
||||
rc->prev = NULL;
|
||||
} else {
|
||||
/* append element to end of list */
|
||||
debug_area_last->next = rc;
|
||||
rc->prev = debug_area_last;
|
||||
}
|
||||
debug_area_last = rc;
|
||||
rc->next = NULL;
|
||||
|
||||
refcount_set(&rc->ref_count, 1);
|
||||
out:
|
||||
return rc;
|
||||
@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
|
||||
*/
|
||||
static void debug_info_put(debug_info_t *db_info)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!db_info)
|
||||
return;
|
||||
if (refcount_dec_and_test(&db_info->ref_count)) {
|
||||
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
|
||||
if (!db_info->views[i])
|
||||
continue;
|
||||
debugfs_remove(db_info->debugfs_entries[i]);
|
||||
}
|
||||
debugfs_remove(db_info->debugfs_root_entry);
|
||||
if (db_info == debug_area_first)
|
||||
debug_area_first = db_info->next;
|
||||
if (db_info == debug_area_last)
|
||||
debug_area_last = db_info->prev;
|
||||
if (db_info->prev)
|
||||
db_info->prev->next = db_info->next;
|
||||
if (db_info->next)
|
||||
db_info->next->prev = db_info->prev;
|
||||
if (refcount_dec_and_test(&db_info->ref_count))
|
||||
debug_info_free(db_info);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
|
||||
return 0; /* success */
|
||||
}
|
||||
|
||||
/* Create debugfs entries and add to internal list. */
|
||||
static void _debug_register(debug_info_t *id)
|
||||
{
|
||||
/* create root directory */
|
||||
id->debugfs_root_entry = debugfs_create_dir(id->name,
|
||||
debug_debugfs_root_entry);
|
||||
|
||||
/* append new element to linked list */
|
||||
if (!debug_area_first) {
|
||||
/* first element in list */
|
||||
debug_area_first = id;
|
||||
id->prev = NULL;
|
||||
} else {
|
||||
/* append element to end of list */
|
||||
debug_area_last->next = id;
|
||||
id->prev = debug_area_last;
|
||||
}
|
||||
debug_area_last = id;
|
||||
id->next = NULL;
|
||||
|
||||
debug_register_view(id, &debug_level_view);
|
||||
debug_register_view(id, &debug_flush_view);
|
||||
debug_register_view(id, &debug_pages_view);
|
||||
}
|
||||
|
||||
/**
|
||||
* debug_register_mode() - creates and initializes debug area.
|
||||
*
|
||||
@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
|
||||
if ((uid != 0) || (gid != 0))
|
||||
pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
|
||||
BUG_ON(!initialized);
|
||||
mutex_lock(&debug_mutex);
|
||||
|
||||
/* create new debug_info */
|
||||
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
|
||||
if (!rc)
|
||||
goto out;
|
||||
debug_register_view(rc, &debug_level_view);
|
||||
debug_register_view(rc, &debug_flush_view);
|
||||
debug_register_view(rc, &debug_pages_view);
|
||||
out:
|
||||
if (!rc)
|
||||
if (rc) {
|
||||
mutex_lock(&debug_mutex);
|
||||
_debug_register(rc);
|
||||
mutex_unlock(&debug_mutex);
|
||||
} else {
|
||||
pr_err("Registering debug feature %s failed\n", name);
|
||||
mutex_unlock(&debug_mutex);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(debug_register_mode);
|
||||
@ -702,6 +692,82 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
|
||||
}
|
||||
EXPORT_SYMBOL(debug_register);
|
||||
|
||||
/**
|
||||
* debug_register_static() - registers a static debug area
|
||||
*
|
||||
* @id: Handle for static debug area
|
||||
* @pages_per_area: Number of pages per area
|
||||
* @nr_areas: Number of debug areas
|
||||
*
|
||||
* Register debug_info_t defined using DEFINE_STATIC_DEBUG_INFO.
|
||||
*
|
||||
* Note: This function is called automatically via an initcall generated by
|
||||
* DEFINE_STATIC_DEBUG_INFO.
|
||||
*/
|
||||
void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas)
|
||||
{
|
||||
unsigned long flags;
|
||||
debug_info_t *copy;
|
||||
|
||||
if (!initialized) {
|
||||
pr_err("Tried to register debug feature %s too early\n",
|
||||
id->name);
|
||||
return;
|
||||
}
|
||||
|
||||
copy = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
|
||||
id->level, ALL_AREAS);
|
||||
if (!copy) {
|
||||
pr_err("Registering debug feature %s failed\n", id->name);
|
||||
|
||||
/* Clear pointers to prevent tracing into released initdata. */
|
||||
spin_lock_irqsave(&id->lock, flags);
|
||||
id->areas = NULL;
|
||||
id->active_pages = NULL;
|
||||
id->active_entries = NULL;
|
||||
spin_unlock_irqrestore(&id->lock, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* Replace static trace area with dynamic copy. */
|
||||
spin_lock_irqsave(&id->lock, flags);
|
||||
debug_events_append(copy, id);
|
||||
debug_areas_swap(id, copy);
|
||||
spin_unlock_irqrestore(&id->lock, flags);
|
||||
|
||||
/* Clear pointers to initdata and discard copy. */
|
||||
copy->areas = NULL;
|
||||
copy->active_pages = NULL;
|
||||
copy->active_entries = NULL;
|
||||
debug_info_free(copy);
|
||||
|
||||
mutex_lock(&debug_mutex);
|
||||
_debug_register(id);
|
||||
mutex_unlock(&debug_mutex);
|
||||
}
|
||||
|
||||
/* Remove debugfs entries and remove from internal list. */
|
||||
static void _debug_unregister(debug_info_t *id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
|
||||
if (!id->views[i])
|
||||
continue;
|
||||
debugfs_remove(id->debugfs_entries[i]);
|
||||
}
|
||||
debugfs_remove(id->debugfs_root_entry);
|
||||
if (id == debug_area_first)
|
||||
debug_area_first = id->next;
|
||||
if (id == debug_area_last)
|
||||
debug_area_last = id->prev;
|
||||
if (id->prev)
|
||||
id->prev->next = id->next;
|
||||
if (id->next)
|
||||
id->next->prev = id->prev;
|
||||
}
|
||||
|
||||
/**
|
||||
* debug_unregister() - give back debug area.
|
||||
*
|
||||
@ -715,8 +781,10 @@ void debug_unregister(debug_info_t *id)
|
||||
if (!id)
|
||||
return;
|
||||
mutex_lock(&debug_mutex);
|
||||
debug_info_put(id);
|
||||
_debug_unregister(id);
|
||||
mutex_unlock(&debug_mutex);
|
||||
|
||||
debug_info_put(id);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_unregister);
|
||||
|
||||
@ -726,35 +794,28 @@ EXPORT_SYMBOL(debug_unregister);
|
||||
*/
|
||||
static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
|
||||
{
|
||||
debug_entry_t ***new_areas;
|
||||
debug_info_t *new_id;
|
||||
unsigned long flags;
|
||||
int rc = 0;
|
||||
|
||||
if (!id || (nr_areas <= 0) || (pages_per_area < 0))
|
||||
return -EINVAL;
|
||||
if (pages_per_area > 0) {
|
||||
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
|
||||
if (!new_areas) {
|
||||
pr_info("Allocating memory for %i pages failed\n",
|
||||
pages_per_area);
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
new_areas = NULL;
|
||||
|
||||
new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
|
||||
id->level, ALL_AREAS);
|
||||
if (!new_id) {
|
||||
pr_info("Allocating memory for %i pages failed\n",
|
||||
pages_per_area);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&id->lock, flags);
|
||||
debug_areas_free(id);
|
||||
id->areas = new_areas;
|
||||
id->nr_areas = nr_areas;
|
||||
id->pages_per_area = pages_per_area;
|
||||
id->active_area = 0;
|
||||
memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
|
||||
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
|
||||
debug_events_append(new_id, id);
|
||||
debug_areas_swap(new_id, id);
|
||||
debug_info_free(new_id);
|
||||
spin_unlock_irqrestore(&id->lock, flags);
|
||||
pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
|
||||
out:
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -772,16 +833,17 @@ void debug_set_level(debug_info_t *id, int new_level)
|
||||
|
||||
if (!id)
|
||||
return;
|
||||
spin_lock_irqsave(&id->lock, flags);
|
||||
|
||||
if (new_level == DEBUG_OFF_LEVEL) {
|
||||
id->level = DEBUG_OFF_LEVEL;
|
||||
pr_info("%s: switched off\n", id->name);
|
||||
} else if ((new_level > DEBUG_MAX_LEVEL) || (new_level < 0)) {
|
||||
pr_info("%s: level %i is out of range (%i - %i)\n",
|
||||
id->name, new_level, 0, DEBUG_MAX_LEVEL);
|
||||
} else {
|
||||
id->level = new_level;
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&id->lock, flags);
|
||||
id->level = new_level;
|
||||
spin_unlock_irqrestore(&id->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_set_level);
|
||||
@ -821,6 +883,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
|
||||
id->active_entries[id->active_area]);
|
||||
}
|
||||
|
||||
/* Swap debug areas of a and b. */
|
||||
static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
|
||||
{
|
||||
swap(a->nr_areas, b->nr_areas);
|
||||
swap(a->pages_per_area, b->pages_per_area);
|
||||
swap(a->areas, b->areas);
|
||||
swap(a->active_area, b->active_area);
|
||||
swap(a->active_pages, b->active_pages);
|
||||
swap(a->active_entries, b->active_entries);
|
||||
}
|
||||
|
||||
/* Append all debug events in active area from source to destination log. */
|
||||
static void debug_events_append(debug_info_t *dest, debug_info_t *src)
|
||||
{
|
||||
debug_entry_t *from, *to, *last;
|
||||
|
||||
if (!src->areas || !dest->areas)
|
||||
return;
|
||||
|
||||
/* Loop over all entries in src, starting with oldest. */
|
||||
from = get_active_entry(src);
|
||||
last = from;
|
||||
do {
|
||||
if (from->clock != 0LL) {
|
||||
to = get_active_entry(dest);
|
||||
memset(to, 0, dest->entry_size);
|
||||
memcpy(to, from, min(src->entry_size,
|
||||
dest->entry_size));
|
||||
proceed_active_entry(dest);
|
||||
}
|
||||
|
||||
proceed_active_entry(src);
|
||||
from = get_active_entry(src);
|
||||
} while (from != last);
|
||||
}
|
||||
|
||||
/*
|
||||
* debug_finish_entry:
|
||||
* - set timestamp, caller address, cpu number etc.
|
||||
@ -1111,16 +1209,17 @@ int debug_register_view(debug_info_t *id, struct debug_view *view)
|
||||
break;
|
||||
}
|
||||
if (i == DEBUG_MAX_VIEWS) {
|
||||
pr_err("Registering view %s/%s would exceed the maximum "
|
||||
"number of views %i\n", id->name, view->name, i);
|
||||
rc = -1;
|
||||
} else {
|
||||
id->views[i] = view;
|
||||
id->debugfs_entries[i] = pde;
|
||||
}
|
||||
spin_unlock_irqrestore(&id->lock, flags);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
pr_err("Registering view %s/%s would exceed the maximum "
|
||||
"number of views %i\n", id->name, view->name, i);
|
||||
debugfs_remove(pde);
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <asm/diag.h>
|
||||
#include <asm/trace/diag.h>
|
||||
#include <asm/sections.h>
|
||||
#include "entry.h"
|
||||
|
||||
struct diag_stat {
|
||||
unsigned int counter[NR_DIAG_STAT];
|
||||
@ -50,8 +51,16 @@ static const struct diag_desc diag_map[NR_DIAG_STAT] = {
|
||||
[DIAG_STAT_X500] = { .code = 0x500, .name = "Virtio Service" },
|
||||
};
|
||||
|
||||
struct diag_ops __bootdata_preserved(diag_dma_ops);
|
||||
struct diag210 *__bootdata_preserved(__diag210_tmp_dma);
|
||||
struct diag_ops __amode31_ref diag_amode31_ops = {
|
||||
.diag210 = _diag210_amode31,
|
||||
.diag26c = _diag26c_amode31,
|
||||
.diag14 = _diag14_amode31,
|
||||
.diag0c = _diag0c_amode31,
|
||||
.diag308_reset = _diag308_reset_amode31
|
||||
};
|
||||
|
||||
static struct diag210 _diag210_tmp_amode31 __section(".amode31.data");
|
||||
struct diag210 __amode31_ref *__diag210_tmp_amode31 = &_diag210_tmp_amode31;
|
||||
|
||||
static int show_diag_stat(struct seq_file *m, void *v)
|
||||
{
|
||||
@ -59,7 +68,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
int cpu, prec, tmp;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
if (n == 0) {
|
||||
seq_puts(m, " ");
|
||||
|
||||
@ -78,7 +87,7 @@ static int show_diag_stat(struct seq_file *m, void *v)
|
||||
}
|
||||
seq_printf(m, " %s\n", diag_map[n-1].name);
|
||||
}
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -135,7 +144,7 @@ EXPORT_SYMBOL(diag_stat_inc_norecursion);
|
||||
int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X014);
|
||||
return diag_dma_ops.diag14(rx, ry1, subcode);
|
||||
return diag_amode31_ops.diag14(rx, ry1, subcode);
|
||||
}
|
||||
EXPORT_SYMBOL(diag14);
|
||||
|
||||
@ -172,12 +181,12 @@ int diag210(struct diag210 *addr)
|
||||
int ccode;
|
||||
|
||||
spin_lock_irqsave(&diag210_lock, flags);
|
||||
*__diag210_tmp_dma = *addr;
|
||||
*__diag210_tmp_amode31 = *addr;
|
||||
|
||||
diag_stat_inc(DIAG_STAT_X210);
|
||||
ccode = diag_dma_ops.diag210(__diag210_tmp_dma);
|
||||
ccode = diag_amode31_ops.diag210(__diag210_tmp_amode31);
|
||||
|
||||
*addr = *__diag210_tmp_dma;
|
||||
*addr = *__diag210_tmp_amode31;
|
||||
spin_unlock_irqrestore(&diag210_lock, flags);
|
||||
|
||||
return ccode;
|
||||
@ -205,6 +214,6 @@ EXPORT_SYMBOL(diag224);
|
||||
int diag26c(void *req, void *resp, enum diag26c_sc subcode)
|
||||
{
|
||||
diag_stat_inc(DIAG_STAT_X26C);
|
||||
return diag_dma_ops.diag26c(req, resp, subcode);
|
||||
return diag_amode31_ops.diag26c(req, resp, subcode);
|
||||
}
|
||||
EXPORT_SYMBOL(diag26c);
|
||||
|
@ -312,10 +312,12 @@ static const unsigned char formats[][6] = {
|
||||
[INSTR_VRR_VV] = { V_8, V_12, 0, 0, 0, 0 },
|
||||
[INSTR_VRR_VV0U] = { V_8, V_12, U4_32, 0, 0, 0 },
|
||||
[INSTR_VRR_VV0U0U] = { V_8, V_12, U4_32, U4_24, 0, 0 },
|
||||
[INSTR_VRR_VV0U2] = { V_8, V_12, U4_24, 0, 0, 0 },
|
||||
[INSTR_VRR_VV0UU2] = { V_8, V_12, U4_32, U4_28, 0, 0 },
|
||||
[INSTR_VRR_VV0UUU] = { V_8, V_12, U4_32, U4_28, U4_24, 0 },
|
||||
[INSTR_VRR_VVV] = { V_8, V_12, V_16, 0, 0, 0 },
|
||||
[INSTR_VRR_VVV0U] = { V_8, V_12, V_16, U4_32, 0, 0 },
|
||||
[INSTR_VRR_VVV0U0] = { V_8, V_12, V_16, U4_24, 0, 0 },
|
||||
[INSTR_VRR_VVV0U0U] = { V_8, V_12, V_16, U4_32, U4_24, 0 },
|
||||
[INSTR_VRR_VVV0UU] = { V_8, V_12, V_16, U4_32, U4_28, 0 },
|
||||
[INSTR_VRR_VVV0UUU] = { V_8, V_12, V_16, U4_32, U4_28, U4_24 },
|
||||
|
@ -236,6 +236,10 @@ static __init void detect_machine_facilities(void)
|
||||
clock_comparator_max = -1ULL >> 1;
|
||||
__ctl_set_bit(0, 53);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_PCI) && test_facility(153)) {
|
||||
S390_lowcore.machine_flags |= MACHINE_FLAG_PCI_MIO;
|
||||
/* the control bit is set during PCI initialization */
|
||||
}
|
||||
}
|
||||
|
||||
static inline void save_vector_registers(void)
|
||||
|
@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
|
||||
4: j 4b
|
||||
ENDPROC(mcck_int_handler)
|
||||
|
||||
#
|
||||
# PSW restart interrupt handler
|
||||
#
|
||||
ENTRY(restart_int_handler)
|
||||
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
|
||||
stg %r15,__LC_SAVE_AREA_RESTART
|
||||
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
|
||||
jz 0f
|
||||
la %r15,4095
|
||||
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
|
||||
0: larl %r15,.Lstosm_tmp
|
||||
stosm 0(%r15),0x04 # turn dat on, keep irqs off
|
||||
lg %r15,__LC_RESTART_STACK
|
||||
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
|
||||
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
|
||||
@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
|
||||
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
|
||||
lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
|
||||
lg %r2,__LC_RESTART_DATA
|
||||
lg %r3,__LC_RESTART_SOURCE
|
||||
lgf %r3,__LC_RESTART_SOURCE
|
||||
ltgr %r3,%r3 # test source cpu address
|
||||
jm 1f # negative -> skip source stop
|
||||
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
|
||||
|
@ -28,10 +28,8 @@ void do_non_secure_storage_access(struct pt_regs *regs);
|
||||
void do_secure_storage_violation(struct pt_regs *regs);
|
||||
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
|
||||
void kernel_stack_overflow(struct pt_regs * regs);
|
||||
void do_signal(struct pt_regs *regs);
|
||||
void handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||
struct pt_regs *regs);
|
||||
void do_notify_resume(struct pt_regs *regs);
|
||||
|
||||
void __init init_IRQ(void);
|
||||
void do_io_irq(struct pt_regs *regs);
|
||||
@ -64,4 +62,13 @@ void stack_free(unsigned long stack);
|
||||
|
||||
extern char kprobes_insn_page[];
|
||||
|
||||
extern char _samode31[], _eamode31[];
|
||||
extern char _stext_amode31[], _etext_amode31[];
|
||||
extern struct exception_table_entry _start_amode31_ex_table[];
|
||||
extern struct exception_table_entry _stop_amode31_ex_table[];
|
||||
|
||||
#define __amode31_data __section(".amode31.data")
|
||||
#define __amode31_ref __section(".amode31.refs")
|
||||
extern long _start_amode31_refs[], _end_amode31_refs[];
|
||||
|
||||
#endif /* _ENTRY_H */
|
||||
|
@ -18,8 +18,11 @@
|
||||
#include <trace/syscall.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ftrace.lds.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include "entry.h"
|
||||
#include "ftrace.h"
|
||||
|
||||
/*
|
||||
* To generate function prologue either gcc's hotpatch feature (since gcc 4.8)
|
||||
@ -41,7 +44,130 @@
|
||||
*/
|
||||
|
||||
void *ftrace_func __read_mostly = ftrace_stub;
|
||||
unsigned long ftrace_plt;
|
||||
struct ftrace_insn {
|
||||
u16 opc;
|
||||
s32 disp;
|
||||
} __packed;
|
||||
|
||||
asm(
|
||||
" .align 16\n"
|
||||
"ftrace_shared_hotpatch_trampoline_br:\n"
|
||||
" lmg %r0,%r1,2(%r1)\n"
|
||||
" br %r1\n"
|
||||
"ftrace_shared_hotpatch_trampoline_br_end:\n"
|
||||
);
|
||||
|
||||
#ifdef CONFIG_EXPOLINE
|
||||
asm(
|
||||
" .align 16\n"
|
||||
"ftrace_shared_hotpatch_trampoline_ex:\n"
|
||||
" lmg %r0,%r1,2(%r1)\n"
|
||||
" ex %r0," __stringify(__LC_BR_R1) "(%r0)\n"
|
||||
" j .\n"
|
||||
"ftrace_shared_hotpatch_trampoline_ex_end:\n"
|
||||
);
|
||||
|
||||
asm(
|
||||
" .align 16\n"
|
||||
"ftrace_shared_hotpatch_trampoline_exrl:\n"
|
||||
" lmg %r0,%r1,2(%r1)\n"
|
||||
" .insn ril,0xc60000000000,%r0,0f\n" /* exrl */
|
||||
" j .\n"
|
||||
"0: br %r1\n"
|
||||
"ftrace_shared_hotpatch_trampoline_exrl_end:\n"
|
||||
);
|
||||
#endif /* CONFIG_EXPOLINE */
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
static char *ftrace_plt;
|
||||
|
||||
asm(
|
||||
" .data\n"
|
||||
"ftrace_plt_template:\n"
|
||||
" basr %r1,%r0\n"
|
||||
" lg %r1,0f-.(%r1)\n"
|
||||
" br %r1\n"
|
||||
"0: .quad ftrace_caller\n"
|
||||
"ftrace_plt_template_end:\n"
|
||||
" .previous\n"
|
||||
);
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
static const char *ftrace_shared_hotpatch_trampoline(const char **end)
|
||||
{
|
||||
const char *tstart, *tend;
|
||||
|
||||
tstart = ftrace_shared_hotpatch_trampoline_br;
|
||||
tend = ftrace_shared_hotpatch_trampoline_br_end;
|
||||
#ifdef CONFIG_EXPOLINE
|
||||
if (!nospec_disable) {
|
||||
tstart = ftrace_shared_hotpatch_trampoline_ex;
|
||||
tend = ftrace_shared_hotpatch_trampoline_ex_end;
|
||||
if (test_facility(35)) { /* exrl */
|
||||
tstart = ftrace_shared_hotpatch_trampoline_exrl;
|
||||
tend = ftrace_shared_hotpatch_trampoline_exrl_end;
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_EXPOLINE */
|
||||
if (end)
|
||||
*end = tend;
|
||||
return tstart;
|
||||
}
|
||||
|
||||
bool ftrace_need_init_nop(void)
|
||||
{
|
||||
return ftrace_shared_hotpatch_trampoline(NULL);
|
||||
}
|
||||
|
||||
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
|
||||
{
|
||||
static struct ftrace_hotpatch_trampoline *next_vmlinux_trampoline =
|
||||
__ftrace_hotpatch_trampolines_start;
|
||||
static const char orig[6] = { 0xc0, 0x04, 0x00, 0x00, 0x00, 0x00 };
|
||||
static struct ftrace_hotpatch_trampoline *trampoline;
|
||||
struct ftrace_hotpatch_trampoline **next_trampoline;
|
||||
struct ftrace_hotpatch_trampoline *trampolines_end;
|
||||
struct ftrace_hotpatch_trampoline tmp;
|
||||
struct ftrace_insn *insn;
|
||||
const char *shared;
|
||||
s32 disp;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct ftrace_hotpatch_trampoline) !=
|
||||
SIZEOF_FTRACE_HOTPATCH_TRAMPOLINE);
|
||||
|
||||
next_trampoline = &next_vmlinux_trampoline;
|
||||
trampolines_end = __ftrace_hotpatch_trampolines_end;
|
||||
shared = ftrace_shared_hotpatch_trampoline(NULL);
|
||||
#ifdef CONFIG_MODULES
|
||||
if (mod) {
|
||||
next_trampoline = &mod->arch.next_trampoline;
|
||||
trampolines_end = mod->arch.trampolines_end;
|
||||
shared = ftrace_plt;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (WARN_ON_ONCE(*next_trampoline >= trampolines_end))
|
||||
return -ENOMEM;
|
||||
trampoline = (*next_trampoline)++;
|
||||
|
||||
/* Check for the compiler-generated fentry nop (brcl 0, .). */
|
||||
if (WARN_ON_ONCE(memcmp((const void *)rec->ip, &orig, sizeof(orig))))
|
||||
return -EINVAL;
|
||||
|
||||
/* Generate the trampoline. */
|
||||
tmp.brasl_opc = 0xc015; /* brasl %r1, shared */
|
||||
tmp.brasl_disp = (shared - (const char *)&trampoline->brasl_opc) / 2;
|
||||
tmp.interceptor = FTRACE_ADDR;
|
||||
tmp.rest_of_intercepted_function = rec->ip + sizeof(struct ftrace_insn);
|
||||
s390_kernel_write(trampoline, &tmp, sizeof(tmp));
|
||||
|
||||
/* Generate a jump to the trampoline. */
|
||||
disp = ((char *)trampoline - (char *)rec->ip) / 2;
|
||||
insn = (struct ftrace_insn *)rec->ip;
|
||||
s390_kernel_write(&insn->disp, &disp, sizeof(disp));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
@ -49,11 +175,45 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftrace_generate_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
/* brcl 0,0 */
|
||||
insn->opc = 0xc004;
|
||||
insn->disp = 0;
|
||||
}
|
||||
|
||||
static void ftrace_generate_call_insn(struct ftrace_insn *insn,
|
||||
unsigned long ip)
|
||||
{
|
||||
unsigned long target;
|
||||
|
||||
/* brasl r0,ftrace_caller */
|
||||
target = FTRACE_ADDR;
|
||||
#ifdef CONFIG_MODULES
|
||||
if (is_module_addr((void *)ip))
|
||||
target = (unsigned long)ftrace_plt;
|
||||
#endif /* CONFIG_MODULES */
|
||||
insn->opc = 0xc005;
|
||||
insn->disp = (target - ip) / 2;
|
||||
}
|
||||
|
||||
static void brcl_disable(void *brcl)
|
||||
{
|
||||
u8 op = 0x04; /* set mask field to zero */
|
||||
|
||||
s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
|
||||
}
|
||||
|
||||
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
brcl_disable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/* Replace ftrace call with a nop. */
|
||||
@ -67,10 +227,22 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcl_enable(void *brcl)
|
||||
{
|
||||
u8 op = 0xf4; /* set mask field to all ones */
|
||||
|
||||
s390_kernel_write((char *)brcl + 1, &op, sizeof(op));
|
||||
}
|
||||
|
||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
struct ftrace_insn orig, new, old;
|
||||
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
brcl_enable((void *)rec->ip);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
/* Replace nop with an ftrace call. */
|
||||
@ -95,22 +267,44 @@ int __init ftrace_dyn_arch_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_ftrace_update_code(int command)
|
||||
{
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL))
|
||||
ftrace_modify_all_code(command);
|
||||
else
|
||||
ftrace_run_stop_machine(command);
|
||||
}
|
||||
|
||||
static void __ftrace_sync(void *dummy)
|
||||
{
|
||||
}
|
||||
|
||||
int ftrace_arch_code_modify_post_process(void)
|
||||
{
|
||||
if (ftrace_shared_hotpatch_trampoline(NULL)) {
|
||||
/* Send SIGP to the other CPUs, so they see the new code. */
|
||||
smp_call_function(__ftrace_sync, NULL, 1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static int __init ftrace_plt_init(void)
|
||||
{
|
||||
unsigned int *ip;
|
||||
const char *start, *end;
|
||||
|
||||
ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
|
||||
ftrace_plt = module_alloc(PAGE_SIZE);
|
||||
if (!ftrace_plt)
|
||||
panic("cannot allocate ftrace plt\n");
|
||||
ip = (unsigned int *) ftrace_plt;
|
||||
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
|
||||
ip[1] = 0x100a0004;
|
||||
ip[2] = 0x07f10000;
|
||||
ip[3] = FTRACE_ADDR >> 32;
|
||||
ip[4] = FTRACE_ADDR & 0xffffffff;
|
||||
set_memory_ro(ftrace_plt, 1);
|
||||
|
||||
start = ftrace_shared_hotpatch_trampoline(&end);
|
||||
if (!start) {
|
||||
start = ftrace_plt_template;
|
||||
end = ftrace_plt_template_end;
|
||||
}
|
||||
memcpy(ftrace_plt, start, end - start);
|
||||
set_memory_ro((unsigned long)ftrace_plt, 1);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(ftrace_plt_init);
|
||||
@ -147,17 +341,13 @@ NOKPROBE_SYMBOL(prepare_ftrace_return);
|
||||
*/
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
u8 op = 0x04; /* set mask field to zero */
|
||||
|
||||
s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
|
||||
brcl_disable(__va(ftrace_graph_caller));
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
u8 op = 0xf4; /* set mask field to all ones */
|
||||
|
||||
s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
|
||||
brcl_enable(__va(ftrace_graph_caller));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
26
arch/s390/kernel/ftrace.h
Normal file
26
arch/s390/kernel/ftrace.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _FTRACE_H
|
||||
#define _FTRACE_H
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
struct ftrace_hotpatch_trampoline {
|
||||
u16 brasl_opc;
|
||||
s32 brasl_disp;
|
||||
s16: 16;
|
||||
u64 rest_of_intercepted_function;
|
||||
u64 interceptor;
|
||||
} __packed;
|
||||
|
||||
extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_start[];
|
||||
extern struct ftrace_hotpatch_trampoline __ftrace_hotpatch_trampolines_end[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_br[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_br_end[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_ex[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_ex_end[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_exrl[];
|
||||
extern const char ftrace_shared_hotpatch_trampoline_exrl_end[];
|
||||
extern const char ftrace_plt_template[];
|
||||
extern const char ftrace_plt_template_end[];
|
||||
|
||||
#endif /* _FTRACE_H */
|
@ -21,6 +21,7 @@ ENTRY(startup_continue)
|
||||
larl %r1,tod_clock_base
|
||||
mvc 0(16,%r1),__LC_BOOT_CLOCK
|
||||
larl %r13,.LPG1 # get base
|
||||
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
||||
#
|
||||
# Setup stack
|
||||
#
|
||||
@ -41,3 +42,19 @@ ENTRY(startup_continue)
|
||||
.align 16
|
||||
.LPG1:
|
||||
.Ldw: .quad 0x0002000180000000,0x0000000000000000
|
||||
.Lctl: .quad 0x04040000 # cr0: AFP registers & secondary space
|
||||
.quad 0 # cr1: primary space segment table
|
||||
.quad 0 # cr2: dispatchable unit control table
|
||||
.quad 0 # cr3: instruction authorization
|
||||
.quad 0xffff # cr4: instruction authorization
|
||||
.quad 0 # cr5: primary-aste origin
|
||||
.quad 0 # cr6: I/O interrupts
|
||||
.quad 0 # cr7: secondary space segment table
|
||||
.quad 0x0000000000008000 # cr8: access registers translation
|
||||
.quad 0 # cr9: tracing off
|
||||
.quad 0 # cr10: tracing off
|
||||
.quad 0 # cr11: tracing off
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad 0 # cr15: linkage stack operations
|
||||
|
@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
|
||||
|
||||
int diag308(unsigned long subcode, void *addr)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
diag_stat_inc(DIAG_STAT_X308);
|
||||
return __diag308(subcode, addr);
|
||||
}
|
||||
@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
|
||||
|
||||
static void __do_restart(void *ignore)
|
||||
{
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
smp_send_stop();
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
crash_kexec(NULL);
|
||||
@ -2082,7 +2079,7 @@ void s390_reset_system(void)
|
||||
|
||||
/* Disable lowcore protection */
|
||||
__ctl_clear_bit(0, 28);
|
||||
diag_dma_ops.diag308_reset();
|
||||
diag_amode31_ops.diag308_reset();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
|
@ -1,4 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
|
@ -228,7 +228,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
int index = *(loff_t *) v;
|
||||
int cpu, irq;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
if (index == 0) {
|
||||
seq_puts(p, " ");
|
||||
for_each_online_cpu(cpu)
|
||||
@ -258,7 +258,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
out:
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ static void jump_label_bug(struct jump_entry *entry, struct insn *expected,
|
||||
unsigned char *ipe = (unsigned char *)expected;
|
||||
unsigned char *ipn = (unsigned char *)new;
|
||||
|
||||
pr_emerg("Jump label code mismatch at %pS [%p]\n", ipc, ipc);
|
||||
pr_emerg("Jump label code mismatch at %pS [%px]\n", ipc, ipc);
|
||||
pr_emerg("Found: %6ph\n", ipc);
|
||||
pr_emerg("Expected: %6ph\n", ipe);
|
||||
pr_emerg("New: %6ph\n", ipn);
|
||||
|
@ -224,8 +224,8 @@ void arch_crash_save_vmcoreinfo(void)
|
||||
VMCOREINFO_SYMBOL(lowcore_ptr);
|
||||
VMCOREINFO_SYMBOL(high_memory);
|
||||
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
|
||||
vmcoreinfo_append_str("SDMA=%lx\n", __sdma);
|
||||
vmcoreinfo_append_str("EDMA=%lx\n", __edma);
|
||||
vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
|
||||
vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
|
||||
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
|
||||
mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
|
||||
}
|
||||
@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
|
||||
*/
|
||||
static void __machine_kexec(void *data)
|
||||
{
|
||||
__arch_local_irq_stosm(0x04); /* enable DAT */
|
||||
pfault_fini();
|
||||
tracing_off();
|
||||
debug_locks_off();
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kasan.h>
|
||||
@ -23,6 +24,8 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/ftrace.lds.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@ -48,6 +51,13 @@ void *module_alloc(unsigned long size)
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
module_memfree(mod->arch.trampolines_start);
|
||||
}
|
||||
#endif
|
||||
|
||||
void module_arch_freeing_init(struct module *mod)
|
||||
{
|
||||
if (is_livepatch_module(mod) &&
|
||||
@ -466,6 +476,30 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
write);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
static int module_alloc_ftrace_hotpatch_trampolines(struct module *me,
|
||||
const Elf_Shdr *s)
|
||||
{
|
||||
char *start, *end;
|
||||
int numpages;
|
||||
size_t size;
|
||||
|
||||
size = FTRACE_HOTPATCH_TRAMPOLINES_SIZE(s->sh_size);
|
||||
numpages = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
start = module_alloc(numpages * PAGE_SIZE);
|
||||
if (!start)
|
||||
return -ENOMEM;
|
||||
set_memory_ro((unsigned long)start, numpages);
|
||||
end = start + size;
|
||||
|
||||
me->arch.trampolines_start = (struct ftrace_hotpatch_trampoline *)start;
|
||||
me->arch.trampolines_end = (struct ftrace_hotpatch_trampoline *)end;
|
||||
me->arch.next_trampoline = me->arch.trampolines_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
@ -473,6 +507,9 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *s;
|
||||
char *secstrings, *secname;
|
||||
void *aseg;
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
int ret;
|
||||
#endif
|
||||
|
||||
if (IS_ENABLED(CONFIG_EXPOLINE) &&
|
||||
!nospec_disable && me->arch.plt_size) {
|
||||
@ -507,6 +544,14 @@ int module_finalize(const Elf_Ehdr *hdr,
|
||||
if (IS_ENABLED(CONFIG_EXPOLINE) &&
|
||||
(str_has_prefix(secname, ".s390_return")))
|
||||
nospec_revert(aseg, aseg + s->sh_size);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
if (!strcmp(FTRACE_CALLSITE_SECTION, secname)) {
|
||||
ret = module_alloc_ftrace_hotpatch_trampolines(me, s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
}
|
||||
|
||||
jump_label_apply_nops(me);
|
||||
|
@ -121,7 +121,7 @@ static void os_info_old_init(void)
|
||||
|
||||
if (os_info_init)
|
||||
return;
|
||||
if (!OLDMEM_BASE)
|
||||
if (!oldmem_data.start)
|
||||
goto fail;
|
||||
if (copy_oldmem_kernel(&addr, &S390_lowcore.os_info, sizeof(addr)))
|
||||
goto fail;
|
||||
|
@ -1138,7 +1138,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
mutex_lock(&cfset_ctrset_mutex);
|
||||
switch (cmd) {
|
||||
case S390_HWCTR_START:
|
||||
@ -1155,7 +1155,7 @@ static long cfset_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&cfset_ctrset_mutex);
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/seq_file.h>
|
||||
@ -23,8 +24,12 @@
|
||||
#include <asm/elf.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/param.h>
|
||||
#include <asm/sclp.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
unsigned long __read_mostly elf_hwcap;
|
||||
char elf_platform[ELF_PLATFORM_SIZE];
|
||||
|
||||
struct cpu_info {
|
||||
unsigned int cpu_mhz_dynamic;
|
||||
unsigned int cpu_mhz_static;
|
||||
@ -113,15 +118,33 @@ static void show_facilities(struct seq_file *m)
|
||||
static void show_cpu_summary(struct seq_file *m, void *v)
|
||||
{
|
||||
static const char *hwcap_str[] = {
|
||||
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
||||
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
|
||||
"vxe2", "vxp", "sort", "dflt"
|
||||
};
|
||||
static const char * const int_hwcap_str[] = {
|
||||
"sie"
|
||||
[HWCAP_NR_ESAN3] = "esan3",
|
||||
[HWCAP_NR_ZARCH] = "zarch",
|
||||
[HWCAP_NR_STFLE] = "stfle",
|
||||
[HWCAP_NR_MSA] = "msa",
|
||||
[HWCAP_NR_LDISP] = "ldisp",
|
||||
[HWCAP_NR_EIMM] = "eimm",
|
||||
[HWCAP_NR_DFP] = "dfp",
|
||||
[HWCAP_NR_HPAGE] = "edat",
|
||||
[HWCAP_NR_ETF3EH] = "etf3eh",
|
||||
[HWCAP_NR_HIGH_GPRS] = "highgprs",
|
||||
[HWCAP_NR_TE] = "te",
|
||||
[HWCAP_NR_VXRS] = "vx",
|
||||
[HWCAP_NR_VXRS_BCD] = "vxd",
|
||||
[HWCAP_NR_VXRS_EXT] = "vxe",
|
||||
[HWCAP_NR_GS] = "gs",
|
||||
[HWCAP_NR_VXRS_EXT2] = "vxe2",
|
||||
[HWCAP_NR_VXRS_PDE] = "vxp",
|
||||
[HWCAP_NR_SORT] = "sort",
|
||||
[HWCAP_NR_DFLT] = "dflt",
|
||||
[HWCAP_NR_VXRS_PDE2] = "vxp2",
|
||||
[HWCAP_NR_NNPA] = "nnpa",
|
||||
[HWCAP_NR_PCI_MIO] = "pcimio",
|
||||
[HWCAP_NR_SIE] = "sie",
|
||||
};
|
||||
int i, cpu;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
|
||||
seq_printf(m, "vendor_id : IBM/S390\n"
|
||||
"# processors : %i\n"
|
||||
"bogomips per cpu: %lu.%02lu\n",
|
||||
@ -132,9 +155,6 @@ static void show_cpu_summary(struct seq_file *m, void *v)
|
||||
for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
|
||||
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
|
||||
seq_printf(m, "%s ", hwcap_str[i]);
|
||||
for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
|
||||
if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
|
||||
seq_printf(m, "%s ", int_hwcap_str[i]);
|
||||
seq_puts(m, "\n");
|
||||
show_facilities(m);
|
||||
show_cacheinfo(m);
|
||||
@ -149,6 +169,141 @@ static void show_cpu_summary(struct seq_file *m, void *v)
|
||||
}
|
||||
}
|
||||
|
||||
static int __init setup_hwcaps(void)
|
||||
{
|
||||
/* instructions named N3, "backported" to esa-mode */
|
||||
if (test_facility(0))
|
||||
elf_hwcap |= HWCAP_ESAN3;
|
||||
|
||||
/* z/Architecture mode active */
|
||||
elf_hwcap |= HWCAP_ZARCH;
|
||||
|
||||
/* store-facility-list-extended */
|
||||
if (test_facility(7))
|
||||
elf_hwcap |= HWCAP_STFLE;
|
||||
|
||||
/* message-security assist */
|
||||
if (test_facility(17))
|
||||
elf_hwcap |= HWCAP_MSA;
|
||||
|
||||
/* long-displacement */
|
||||
if (test_facility(19))
|
||||
elf_hwcap |= HWCAP_LDISP;
|
||||
|
||||
/* extended-immediate */
|
||||
if (test_facility(21))
|
||||
elf_hwcap |= HWCAP_EIMM;
|
||||
|
||||
/* extended-translation facility 3 enhancement */
|
||||
if (test_facility(22) && test_facility(30))
|
||||
elf_hwcap |= HWCAP_ETF3EH;
|
||||
|
||||
/* decimal floating point & perform floating point operation */
|
||||
if (test_facility(42) && test_facility(44))
|
||||
elf_hwcap |= HWCAP_DFP;
|
||||
|
||||
/* huge page support */
|
||||
if (MACHINE_HAS_EDAT1)
|
||||
elf_hwcap |= HWCAP_HPAGE;
|
||||
|
||||
/* 64-bit register support for 31-bit processes */
|
||||
elf_hwcap |= HWCAP_HIGH_GPRS;
|
||||
|
||||
/* transactional execution */
|
||||
if (MACHINE_HAS_TE)
|
||||
elf_hwcap |= HWCAP_TE;
|
||||
|
||||
/*
|
||||
* Vector extension can be disabled with the "novx" parameter.
|
||||
* Use MACHINE_HAS_VX instead of facility bit 129.
|
||||
*/
|
||||
if (MACHINE_HAS_VX) {
|
||||
elf_hwcap |= HWCAP_VXRS;
|
||||
if (test_facility(134))
|
||||
elf_hwcap |= HWCAP_VXRS_BCD;
|
||||
if (test_facility(135))
|
||||
elf_hwcap |= HWCAP_VXRS_EXT;
|
||||
if (test_facility(148))
|
||||
elf_hwcap |= HWCAP_VXRS_EXT2;
|
||||
if (test_facility(152))
|
||||
elf_hwcap |= HWCAP_VXRS_PDE;
|
||||
if (test_facility(192))
|
||||
elf_hwcap |= HWCAP_VXRS_PDE2;
|
||||
}
|
||||
|
||||
if (test_facility(150))
|
||||
elf_hwcap |= HWCAP_SORT;
|
||||
|
||||
if (test_facility(151))
|
||||
elf_hwcap |= HWCAP_DFLT;
|
||||
|
||||
if (test_facility(165))
|
||||
elf_hwcap |= HWCAP_NNPA;
|
||||
|
||||
/* guarded storage */
|
||||
if (MACHINE_HAS_GS)
|
||||
elf_hwcap |= HWCAP_GS;
|
||||
|
||||
if (MACHINE_HAS_PCI_MIO)
|
||||
elf_hwcap |= HWCAP_PCI_MIO;
|
||||
|
||||
/* virtualization support */
|
||||
if (sclp.has_sief2)
|
||||
elf_hwcap |= HWCAP_SIE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(setup_hwcaps);
|
||||
|
||||
static int __init setup_elf_platform(void)
|
||||
{
|
||||
struct cpuid cpu_id;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
add_device_randomness(&cpu_id, sizeof(cpu_id));
|
||||
switch (cpu_id.machine) {
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
default: /* Use "z900" as default for 64 bit kernels. */
|
||||
strcpy(elf_platform, "z900");
|
||||
break;
|
||||
case 0x2084:
|
||||
case 0x2086:
|
||||
strcpy(elf_platform, "z990");
|
||||
break;
|
||||
case 0x2094:
|
||||
case 0x2096:
|
||||
strcpy(elf_platform, "z9-109");
|
||||
break;
|
||||
case 0x2097:
|
||||
case 0x2098:
|
||||
strcpy(elf_platform, "z10");
|
||||
break;
|
||||
case 0x2817:
|
||||
case 0x2818:
|
||||
strcpy(elf_platform, "z196");
|
||||
break;
|
||||
case 0x2827:
|
||||
case 0x2828:
|
||||
strcpy(elf_platform, "zEC12");
|
||||
break;
|
||||
case 0x2964:
|
||||
case 0x2965:
|
||||
strcpy(elf_platform, "z13");
|
||||
break;
|
||||
case 0x3906:
|
||||
case 0x3907:
|
||||
strcpy(elf_platform, "z14");
|
||||
break;
|
||||
case 0x8561:
|
||||
case 0x8562:
|
||||
strcpy(elf_platform, "z15");
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(setup_elf_platform);
|
||||
|
||||
static void show_cpu_topology(struct seq_file *m, unsigned long n)
|
||||
{
|
||||
#ifdef CONFIG_SCHED_TOPOLOGY
|
||||
@ -210,7 +365,7 @@ static inline void *c_update(loff_t *pos)
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
return c_update(pos);
|
||||
}
|
||||
|
||||
@ -222,7 +377,7 @@ static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
|
||||
static void c_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
const struct seq_operations cpuinfo_op = {
|
||||
|
@ -89,27 +89,71 @@ EXPORT_SYMBOL(console_devno);
|
||||
unsigned int console_irq = -1;
|
||||
EXPORT_SYMBOL(console_irq);
|
||||
|
||||
unsigned long elf_hwcap __read_mostly = 0;
|
||||
char elf_platform[ELF_PLATFORM_SIZE];
|
||||
/*
|
||||
* Some code and data needs to stay below 2 GB, even when the kernel would be
|
||||
* relocated above 2 GB, because it has to use 31 bit addresses.
|
||||
* Such code and data is part of the .amode31 section.
|
||||
*/
|
||||
unsigned long __amode31_ref __samode31 = __pa(&_samode31);
|
||||
unsigned long __amode31_ref __eamode31 = __pa(&_eamode31);
|
||||
unsigned long __amode31_ref __stext_amode31 = __pa(&_stext_amode31);
|
||||
unsigned long __amode31_ref __etext_amode31 = __pa(&_etext_amode31);
|
||||
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
|
||||
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
|
||||
|
||||
unsigned long int_hwcap = 0;
|
||||
/*
|
||||
* Control registers CR2, CR5 and CR15 are initialized with addresses
|
||||
* of tables that must be placed below 2G which is handled by the AMODE31
|
||||
* sections.
|
||||
* Because the AMODE31 sections are relocated below 2G at startup,
|
||||
* the content of control registers CR2, CR5 and CR15 must be updated
|
||||
* with new addresses after the relocation. The initial initialization of
|
||||
* control registers occurs in head64.S and then gets updated again after AMODE31
|
||||
* relocation. We must access the relevant AMODE31 tables indirectly via
|
||||
* pointers placed in the .amode31.refs linker section. Those pointers get
|
||||
* updated automatically during AMODE31 relocation and always contain a valid
|
||||
* address within AMODE31 sections.
|
||||
*/
|
||||
|
||||
static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
|
||||
|
||||
static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
|
||||
[1] = 0xffffffffffffffff
|
||||
};
|
||||
|
||||
static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0,
|
||||
0x80000000, 0, 0, 0
|
||||
};
|
||||
|
||||
static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
|
||||
0, 0, 0x89000000, 0,
|
||||
0, 0, 0x8a000000, 0
|
||||
};
|
||||
|
||||
static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
|
||||
static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
|
||||
static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
|
||||
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
|
||||
|
||||
int __bootdata(noexec_disabled);
|
||||
unsigned long __bootdata(ident_map_size);
|
||||
struct mem_detect_info __bootdata(mem_detect);
|
||||
struct initrd_data __bootdata(initrd_data);
|
||||
|
||||
struct exception_table_entry *__bootdata_preserved(__start_dma_ex_table);
|
||||
struct exception_table_entry *__bootdata_preserved(__stop_dma_ex_table);
|
||||
unsigned long __bootdata_preserved(__stext_dma);
|
||||
unsigned long __bootdata_preserved(__etext_dma);
|
||||
unsigned long __bootdata_preserved(__sdma);
|
||||
unsigned long __bootdata_preserved(__edma);
|
||||
unsigned long __bootdata_preserved(__kaslr_offset);
|
||||
unsigned int __bootdata_preserved(zlib_dfltcc_support);
|
||||
EXPORT_SYMBOL(zlib_dfltcc_support);
|
||||
u64 __bootdata_preserved(stfle_fac_list[16]);
|
||||
EXPORT_SYMBOL(stfle_fac_list);
|
||||
u64 __bootdata_preserved(alt_stfle_fac_list[16]);
|
||||
struct oldmem_data __bootdata_preserved(oldmem_data);
|
||||
|
||||
unsigned long VMALLOC_START;
|
||||
EXPORT_SYMBOL(VMALLOC_START);
|
||||
@ -254,7 +298,7 @@ static void __init setup_zfcpdump(void)
|
||||
{
|
||||
if (!is_ipl_type_dump())
|
||||
return;
|
||||
if (OLDMEM_BASE)
|
||||
if (oldmem_data.start)
|
||||
return;
|
||||
strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
|
||||
console_loglevel = 2;
|
||||
@ -421,7 +465,7 @@ static void __init setup_lowcore_dat_off(void)
|
||||
lc->restart_stack = (unsigned long) restart_stack;
|
||||
lc->restart_fn = (unsigned long) do_restart;
|
||||
lc->restart_data = 0;
|
||||
lc->restart_source = -1UL;
|
||||
lc->restart_source = -1U;
|
||||
|
||||
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
if (!mcck_stack)
|
||||
@ -450,12 +494,19 @@ static void __init setup_lowcore_dat_off(void)
|
||||
|
||||
static void __init setup_lowcore_dat_on(void)
|
||||
{
|
||||
struct lowcore *lc = lowcore_ptr[0];
|
||||
|
||||
__ctl_clear_bit(0, 28);
|
||||
S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
|
||||
S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
|
||||
__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
|
||||
__ctl_set_bit(0, 28);
|
||||
mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
|
||||
mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
|
||||
memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
|
||||
sizeof(S390_lowcore.cregs_save_area));
|
||||
}
|
||||
|
||||
static struct resource code_resource = {
|
||||
@ -610,9 +661,9 @@ static void __init reserve_crashkernel(void)
|
||||
return;
|
||||
}
|
||||
|
||||
low = crash_base ?: OLDMEM_BASE;
|
||||
low = crash_base ?: oldmem_data.start;
|
||||
high = low + crash_size;
|
||||
if (low >= OLDMEM_BASE && high <= OLDMEM_BASE + OLDMEM_SIZE) {
|
||||
if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
|
||||
/* The crashkernel fits into OLDMEM, reuse OLDMEM */
|
||||
crash_base = low;
|
||||
} else {
|
||||
@ -639,7 +690,7 @@ static void __init reserve_crashkernel(void)
|
||||
if (register_memory_notifier(&kdump_mem_nb))
|
||||
return;
|
||||
|
||||
if (!OLDMEM_BASE && MACHINE_IS_VM)
|
||||
if (!oldmem_data.start && MACHINE_IS_VM)
|
||||
diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
@ -658,11 +709,11 @@ static void __init reserve_crashkernel(void)
|
||||
static void __init reserve_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (!INITRD_START || !INITRD_SIZE)
|
||||
if (!initrd_data.start || !initrd_data.size)
|
||||
return;
|
||||
initrd_start = INITRD_START;
|
||||
initrd_end = initrd_start + INITRD_SIZE;
|
||||
memblock_reserve(INITRD_START, INITRD_SIZE);
|
||||
initrd_start = initrd_data.start;
|
||||
initrd_end = initrd_start + initrd_data.size;
|
||||
memblock_reserve(initrd_data.start, initrd_data.size);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -732,10 +783,10 @@ static void __init memblock_add_mem_detect_info(void)
|
||||
static void __init check_initrd(void)
|
||||
{
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (INITRD_START && INITRD_SIZE &&
|
||||
!memblock_is_region_memory(INITRD_START, INITRD_SIZE)) {
|
||||
if (initrd_data.start && initrd_data.size &&
|
||||
!memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
|
||||
pr_err("The initial RAM disk does not fit into the memory\n");
|
||||
memblock_free(INITRD_START, INITRD_SIZE);
|
||||
memblock_free(initrd_data.start, initrd_data.size);
|
||||
initrd_start = initrd_end = 0;
|
||||
}
|
||||
#endif
|
||||
@ -748,10 +799,10 @@ static void __init reserve_kernel(void)
|
||||
{
|
||||
unsigned long start_pfn = PFN_UP(__pa(_end));
|
||||
|
||||
memblock_reserve(0, HEAD_END);
|
||||
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
|
||||
memblock_reserve((unsigned long)sclp_early_sccb, EXT_SCCB_READ_SCP);
|
||||
memblock_reserve((unsigned long)_stext, PFN_PHYS(start_pfn)
|
||||
- (unsigned long)_stext);
|
||||
memblock_reserve(__sdma, __edma - __sdma);
|
||||
}
|
||||
|
||||
static void __init setup_memory(void)
|
||||
@ -771,152 +822,52 @@ static void __init setup_memory(void)
|
||||
memblock_enforce_memory_limit(memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup hardware capabilities.
|
||||
*/
|
||||
static int __init setup_hwcaps(void)
|
||||
static void __init relocate_amode31_section(void)
|
||||
{
|
||||
static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
|
||||
struct cpuid cpu_id;
|
||||
int i;
|
||||
unsigned long amode31_addr, amode31_size;
|
||||
long amode31_offset;
|
||||
long *ptr;
|
||||
|
||||
/*
|
||||
* The store facility list bits numbers as found in the principles
|
||||
* of operation are numbered with bit 1UL<<31 as number 0 to
|
||||
* bit 1UL<<0 as number 31.
|
||||
* Bit 0: instructions named N3, "backported" to esa-mode
|
||||
* Bit 2: z/Architecture mode is active
|
||||
* Bit 7: the store-facility-list-extended facility is installed
|
||||
* Bit 17: the message-security assist is installed
|
||||
* Bit 19: the long-displacement facility is installed
|
||||
* Bit 21: the extended-immediate facility is installed
|
||||
* Bit 22: extended-translation facility 3 is installed
|
||||
* Bit 30: extended-translation facility 3 enhancement facility
|
||||
* These get translated to:
|
||||
* HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
|
||||
* HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
|
||||
* HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
|
||||
* HWCAP_S390_ETF3EH bit 8 (22 && 30).
|
||||
*/
|
||||
for (i = 0; i < 6; i++)
|
||||
if (test_facility(stfl_bits[i]))
|
||||
elf_hwcap |= 1UL << i;
|
||||
/* Allocate a new AMODE31 capable memory region */
|
||||
amode31_size = __eamode31 - __samode31;
|
||||
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
|
||||
amode31_addr = (unsigned long)memblock_alloc_low(amode31_size, PAGE_SIZE);
|
||||
if (!amode31_addr)
|
||||
panic("Failed to allocate memory for AMODE31 section\n");
|
||||
amode31_offset = amode31_addr - __samode31;
|
||||
|
||||
if (test_facility(22) && test_facility(30))
|
||||
elf_hwcap |= HWCAP_S390_ETF3EH;
|
||||
/* Move original AMODE31 section to the new one */
|
||||
memmove((void *)amode31_addr, (void *)__samode31, amode31_size);
|
||||
/* Zero out the old AMODE31 section to catch invalid accesses within it */
|
||||
memset((void *)__samode31, 0, amode31_size);
|
||||
|
||||
/*
|
||||
* Check for additional facilities with store-facility-list-extended.
|
||||
* stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
|
||||
* and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
|
||||
* as stored by stfl, bits 32-xxx contain additional facilities.
|
||||
* How many facility words are stored depends on the number of
|
||||
* doublewords passed to the instruction. The additional facilities
|
||||
* are:
|
||||
* Bit 42: decimal floating point facility is installed
|
||||
* Bit 44: perform floating point operation facility is installed
|
||||
* translated to:
|
||||
* HWCAP_S390_DFP bit 6 (42 && 44).
|
||||
*/
|
||||
if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
|
||||
elf_hwcap |= HWCAP_S390_DFP;
|
||||
|
||||
/*
|
||||
* Huge page support HWCAP_S390_HPAGE is bit 7.
|
||||
*/
|
||||
if (MACHINE_HAS_EDAT1)
|
||||
elf_hwcap |= HWCAP_S390_HPAGE;
|
||||
|
||||
/*
|
||||
* 64-bit register support for 31-bit processes
|
||||
* HWCAP_S390_HIGH_GPRS is bit 9.
|
||||
*/
|
||||
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
|
||||
|
||||
/*
|
||||
* Transactional execution support HWCAP_S390_TE is bit 10.
|
||||
*/
|
||||
if (MACHINE_HAS_TE)
|
||||
elf_hwcap |= HWCAP_S390_TE;
|
||||
|
||||
/*
|
||||
* Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
|
||||
* can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
|
||||
* instead of facility bit 129.
|
||||
*/
|
||||
if (MACHINE_HAS_VX) {
|
||||
elf_hwcap |= HWCAP_S390_VXRS;
|
||||
if (test_facility(134))
|
||||
elf_hwcap |= HWCAP_S390_VXRS_BCD;
|
||||
if (test_facility(135))
|
||||
elf_hwcap |= HWCAP_S390_VXRS_EXT;
|
||||
if (test_facility(148))
|
||||
elf_hwcap |= HWCAP_S390_VXRS_EXT2;
|
||||
if (test_facility(152))
|
||||
elf_hwcap |= HWCAP_S390_VXRS_PDE;
|
||||
}
|
||||
if (test_facility(150))
|
||||
elf_hwcap |= HWCAP_S390_SORT;
|
||||
if (test_facility(151))
|
||||
elf_hwcap |= HWCAP_S390_DFLT;
|
||||
|
||||
/*
|
||||
* Guarded storage support HWCAP_S390_GS is bit 12.
|
||||
*/
|
||||
if (MACHINE_HAS_GS)
|
||||
elf_hwcap |= HWCAP_S390_GS;
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
add_device_randomness(&cpu_id, sizeof(cpu_id));
|
||||
switch (cpu_id.machine) {
|
||||
case 0x2064:
|
||||
case 0x2066:
|
||||
default: /* Use "z900" as default for 64 bit kernels. */
|
||||
strcpy(elf_platform, "z900");
|
||||
break;
|
||||
case 0x2084:
|
||||
case 0x2086:
|
||||
strcpy(elf_platform, "z990");
|
||||
break;
|
||||
case 0x2094:
|
||||
case 0x2096:
|
||||
strcpy(elf_platform, "z9-109");
|
||||
break;
|
||||
case 0x2097:
|
||||
case 0x2098:
|
||||
strcpy(elf_platform, "z10");
|
||||
break;
|
||||
case 0x2817:
|
||||
case 0x2818:
|
||||
strcpy(elf_platform, "z196");
|
||||
break;
|
||||
case 0x2827:
|
||||
case 0x2828:
|
||||
strcpy(elf_platform, "zEC12");
|
||||
break;
|
||||
case 0x2964:
|
||||
case 0x2965:
|
||||
strcpy(elf_platform, "z13");
|
||||
break;
|
||||
case 0x3906:
|
||||
case 0x3907:
|
||||
strcpy(elf_platform, "z14");
|
||||
break;
|
||||
case 0x8561:
|
||||
case 0x8562:
|
||||
strcpy(elf_platform, "z15");
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Virtualization support HWCAP_INT_SIE is bit 0.
|
||||
*/
|
||||
if (sclp.has_sief2)
|
||||
int_hwcap |= HWCAP_INT_SIE;
|
||||
|
||||
return 0;
|
||||
/* Update all AMODE31 region references */
|
||||
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
|
||||
*ptr += amode31_offset;
|
||||
}
|
||||
|
||||
/* This must be called after AMODE31 relocation */
|
||||
static void __init setup_cr(void)
|
||||
{
|
||||
union ctlreg2 cr2;
|
||||
union ctlreg5 cr5;
|
||||
union ctlreg15 cr15;
|
||||
|
||||
__ctl_duct[1] = (unsigned long)__ctl_aste;
|
||||
__ctl_duct[2] = (unsigned long)__ctl_aste;
|
||||
__ctl_duct[4] = (unsigned long)__ctl_duald;
|
||||
|
||||
/* Update control registers CR2, CR5 and CR15 */
|
||||
__ctl_store(cr2.val, 2, 2);
|
||||
__ctl_store(cr5.val, 5, 5);
|
||||
__ctl_store(cr15.val, 15, 15);
|
||||
cr2.ducto = (unsigned long)__ctl_duct >> 6;
|
||||
cr5.pasteo = (unsigned long)__ctl_duct >> 6;
|
||||
cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
|
||||
__ctl_load(cr2.val, 2, 2);
|
||||
__ctl_load(cr5.val, 5, 5);
|
||||
__ctl_load(cr15.val, 15, 15);
|
||||
}
|
||||
arch_initcall(setup_hwcaps);
|
||||
|
||||
/*
|
||||
* Add system information as device randomness
|
||||
@ -1059,6 +1010,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
free_mem_detect_info();
|
||||
|
||||
relocate_amode31_section();
|
||||
setup_cr();
|
||||
|
||||
setup_uv();
|
||||
setup_memory_end();
|
||||
setup_memory();
|
||||
|
@ -533,9 +533,3 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
|
||||
*/
|
||||
restore_saved_sigmask();
|
||||
}
|
||||
|
||||
void do_notify_resume(struct pt_regs *regs)
|
||||
{
|
||||
tracehook_notify_resume(regs);
|
||||
rseq_handle_notify_resume(NULL, regs);
|
||||
}
|
||||
|
@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
|
||||
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
|
||||
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
|
||||
lc->cpu_nr = cpu;
|
||||
lc->restart_flags = RESTART_FLAG_CTLREGS;
|
||||
lc->spinlock_lockval = arch_spin_lockval(cpu);
|
||||
lc->spinlock_index = 0;
|
||||
lc->percpu_offset = __per_cpu_offset[cpu];
|
||||
@ -294,10 +295,10 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
|
||||
|
||||
cpu = pcpu - pcpu_devices;
|
||||
lc = lowcore_ptr[cpu];
|
||||
lc->restart_stack = lc->nodat_stack;
|
||||
lc->restart_stack = lc->kernel_stack;
|
||||
lc->restart_fn = (unsigned long) func;
|
||||
lc->restart_data = (unsigned long) data;
|
||||
lc->restart_source = -1UL;
|
||||
lc->restart_source = -1U;
|
||||
pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
|
||||
}
|
||||
|
||||
@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
|
||||
func(data); /* should not return */
|
||||
}
|
||||
|
||||
static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
|
||||
pcpu_delegate_fn *func,
|
||||
void *data, unsigned long stack)
|
||||
static void pcpu_delegate(struct pcpu *pcpu,
|
||||
pcpu_delegate_fn *func,
|
||||
void *data, unsigned long stack)
|
||||
{
|
||||
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
|
||||
unsigned long source_cpu = stap();
|
||||
unsigned int source_cpu = stap();
|
||||
|
||||
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
|
||||
if (pcpu->address == source_cpu) {
|
||||
@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
|
||||
__ctl_load(cregs, 0, 15);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(ctl_lock);
|
||||
static unsigned long ctlreg;
|
||||
|
||||
/*
|
||||
* Set a bit in a control register of all cpus
|
||||
*/
|
||||
@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
|
||||
{
|
||||
struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
|
||||
|
||||
spin_lock(&ctl_lock);
|
||||
memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
|
||||
__set_bit(bit, &ctlreg);
|
||||
memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
|
||||
spin_unlock(&ctl_lock);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_set_bit);
|
||||
@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
|
||||
{
|
||||
struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
|
||||
|
||||
spin_lock(&ctl_lock);
|
||||
memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
|
||||
__clear_bit(bit, &ctlreg);
|
||||
memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
|
||||
spin_unlock(&ctl_lock);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ctl_clear_bit);
|
||||
@ -673,7 +687,7 @@ void __init smp_save_dump_cpus(void)
|
||||
unsigned long page;
|
||||
bool is_boot_cpu;
|
||||
|
||||
if (!(OLDMEM_BASE || is_ipl_type_dump()))
|
||||
if (!(oldmem_data.start || is_ipl_type_dump()))
|
||||
/* No previous system present, normal boot. */
|
||||
return;
|
||||
/* Allocate a page as dumping area for the store status sigps */
|
||||
@ -704,12 +718,12 @@ void __init smp_save_dump_cpus(void)
|
||||
* these registers an SCLP request is required which is
|
||||
* done by drivers/s390/char/zcore.c:init_cpu_info()
|
||||
*/
|
||||
if (!is_boot_cpu || OLDMEM_BASE)
|
||||
if (!is_boot_cpu || oldmem_data.start)
|
||||
/* Get the CPU registers */
|
||||
smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
|
||||
}
|
||||
memblock_free(page, PAGE_SIZE);
|
||||
diag_dma_ops.diag308_reset();
|
||||
diag_amode31_ops.diag308_reset();
|
||||
pcpu_set_smt(0);
|
||||
}
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
@ -793,7 +807,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
|
||||
u16 core_id;
|
||||
int nr, i;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
nr = 0;
|
||||
cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
|
||||
@ -816,7 +830,7 @@ static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
|
||||
nr += smp_add_core(&info->core[i], &avail, configured, early);
|
||||
}
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return nr;
|
||||
}
|
||||
|
||||
@ -868,11 +882,19 @@ void __init smp_detect_cpus(void)
|
||||
memblock_free_early((unsigned long)info, sizeof(*info));
|
||||
}
|
||||
|
||||
static void smp_init_secondary(void)
|
||||
/*
|
||||
* Activate a secondary processor.
|
||||
*/
|
||||
static void smp_start_secondary(void *cpuvoid)
|
||||
{
|
||||
int cpu = raw_smp_processor_id();
|
||||
|
||||
S390_lowcore.last_update_clock = get_tod_clock();
|
||||
S390_lowcore.restart_stack = (unsigned long)restart_stack;
|
||||
S390_lowcore.restart_fn = (unsigned long)do_restart;
|
||||
S390_lowcore.restart_data = 0;
|
||||
S390_lowcore.restart_source = -1U;
|
||||
S390_lowcore.restart_flags = 0;
|
||||
restore_access_regs(S390_lowcore.access_regs_save_area);
|
||||
cpu_init();
|
||||
rcu_cpu_starting(cpu);
|
||||
@ -892,20 +914,6 @@ static void smp_init_secondary(void)
|
||||
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate a secondary processor.
|
||||
*/
|
||||
static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
|
||||
{
|
||||
S390_lowcore.restart_stack = (unsigned long) restart_stack;
|
||||
S390_lowcore.restart_fn = (unsigned long) do_restart;
|
||||
S390_lowcore.restart_data = 0;
|
||||
S390_lowcore.restart_source = -1UL;
|
||||
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
|
||||
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
|
||||
call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
|
||||
}
|
||||
|
||||
/* Upping and downing of CPUs */
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||
{
|
||||
@ -1055,7 +1063,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
return -EINVAL;
|
||||
if (val != 0 && val != 1)
|
||||
return -EINVAL;
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
rc = -EBUSY;
|
||||
/* disallow configuration changes of online cpus and cpu 0 */
|
||||
@ -1104,7 +1112,7 @@ static ssize_t cpu_configure_store(struct device *dev,
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return rc ? rc : count;
|
||||
}
|
||||
static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
|
||||
|
@ -9,14 +9,14 @@
|
||||
#include <asm/errno.h>
|
||||
#include <asm/sigp.h>
|
||||
|
||||
.section .dma.text,"ax"
|
||||
.section .amode31.text,"ax"
|
||||
/*
|
||||
* Simplified version of expoline thunk. The normal thunks can not be used here,
|
||||
* because they might be more than 2 GB away, and not reachable by the relative
|
||||
* branch. No comdat, exrl, etc. optimizations used here, because it only
|
||||
* affects a few functions that are not performance-relevant.
|
||||
*/
|
||||
.macro BR_EX_DMA_r14
|
||||
.macro BR_EX_AMODE31_r14
|
||||
larl %r1,0f
|
||||
ex 0,0(%r1)
|
||||
j .
|
||||
@ -24,9 +24,9 @@
|
||||
.endm
|
||||
|
||||
/*
|
||||
* int _diag14_dma(unsigned long rx, unsigned long ry1, unsigned long subcode)
|
||||
* int _diag14_amode31(unsigned long rx, unsigned long ry1, unsigned long subcode)
|
||||
*/
|
||||
ENTRY(_diag14_dma)
|
||||
ENTRY(_diag14_amode31)
|
||||
lgr %r1,%r2
|
||||
lgr %r2,%r3
|
||||
lgr %r3,%r4
|
||||
@ -39,14 +39,14 @@ ENTRY(_diag14_dma)
|
||||
.Ldiag14_fault:
|
||||
sam64
|
||||
lgfr %r2,%r5
|
||||
BR_EX_DMA_r14
|
||||
EX_TABLE_DMA(.Ldiag14_ex, .Ldiag14_fault)
|
||||
ENDPROC(_diag14_dma)
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag14_ex, .Ldiag14_fault)
|
||||
ENDPROC(_diag14_amode31)
|
||||
|
||||
/*
|
||||
* int _diag210_dma(struct diag210 *addr)
|
||||
* int _diag210_amode31(struct diag210 *addr)
|
||||
*/
|
||||
ENTRY(_diag210_dma)
|
||||
ENTRY(_diag210_amode31)
|
||||
lgr %r1,%r2
|
||||
lhi %r2,-1
|
||||
sam31
|
||||
@ -57,40 +57,40 @@ ENTRY(_diag210_dma)
|
||||
.Ldiag210_fault:
|
||||
sam64
|
||||
lgfr %r2,%r2
|
||||
BR_EX_DMA_r14
|
||||
EX_TABLE_DMA(.Ldiag210_ex, .Ldiag210_fault)
|
||||
ENDPROC(_diag210_dma)
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag210_ex, .Ldiag210_fault)
|
||||
ENDPROC(_diag210_amode31)
|
||||
|
||||
/*
|
||||
* int _diag26c_dma(void *req, void *resp, enum diag26c_sc subcode)
|
||||
* int _diag26c_amode31(void *req, void *resp, enum diag26c_sc subcode)
|
||||
*/
|
||||
ENTRY(_diag26c_dma)
|
||||
ENTRY(_diag26c_amode31)
|
||||
lghi %r5,-EOPNOTSUPP
|
||||
sam31
|
||||
diag %r2,%r4,0x26c
|
||||
.Ldiag26c_ex:
|
||||
sam64
|
||||
lgfr %r2,%r5
|
||||
BR_EX_DMA_r14
|
||||
EX_TABLE_DMA(.Ldiag26c_ex, .Ldiag26c_ex)
|
||||
ENDPROC(_diag26c_dma)
|
||||
BR_EX_AMODE31_r14
|
||||
EX_TABLE_AMODE31(.Ldiag26c_ex, .Ldiag26c_ex)
|
||||
ENDPROC(_diag26c_amode31)
|
||||
|
||||
/*
|
||||
* void _diag0c_dma(struct hypfs_diag0c_entry *entry)
|
||||
* void _diag0c_amode31(struct hypfs_diag0c_entry *entry)
|
||||
*/
|
||||
ENTRY(_diag0c_dma)
|
||||
ENTRY(_diag0c_amode31)
|
||||
sam31
|
||||
diag %r2,%r2,0x0c
|
||||
sam64
|
||||
BR_EX_DMA_r14
|
||||
ENDPROC(_diag0c_dma)
|
||||
BR_EX_AMODE31_r14
|
||||
ENDPROC(_diag0c_amode31)
|
||||
|
||||
/*
|
||||
* void _diag308_reset_dma(void)
|
||||
* void _diag308_reset_amode31(void)
|
||||
*
|
||||
* Calls diag 308 subcode 1 and continues execution
|
||||
*/
|
||||
ENTRY(_diag308_reset_dma)
|
||||
ENTRY(_diag308_reset_amode31)
|
||||
larl %r4,.Lctlregs # Save control registers
|
||||
stctg %c0,%c15,0(%r4)
|
||||
lg %r2,0(%r4) # Disable lowcore protection
|
||||
@ -107,7 +107,7 @@ ENTRY(_diag308_reset_dma)
|
||||
larl %r4,.Lcontinue_psw # Save PSW flags
|
||||
epsw %r2,%r3
|
||||
stm %r2,%r3,0(%r4)
|
||||
larl %r4,restart_part2 # Setup restart PSW at absolute 0
|
||||
larl %r4,.Lrestart_part2 # Setup restart PSW at absolute 0
|
||||
larl %r3,.Lrestart_diag308_psw
|
||||
og %r4,0(%r3) # Save PSW
|
||||
lghi %r3,0
|
||||
@ -115,7 +115,7 @@ ENTRY(_diag308_reset_dma)
|
||||
lghi %r1,1
|
||||
lghi %r0,0
|
||||
diag %r0,%r1,0x308
|
||||
restart_part2:
|
||||
.Lrestart_part2:
|
||||
lhi %r0,0 # Load r0 with zero
|
||||
lhi %r1,2 # Use mode 2 = ESAME (dump)
|
||||
sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
|
||||
@ -127,19 +127,21 @@ restart_part2:
|
||||
larl %r4,.Lprefix # Restore prefix register
|
||||
spx 0(%r4)
|
||||
larl %r4,.Lcontinue_psw # Restore PSW flags
|
||||
larl %r2,.Lcontinue
|
||||
stg %r2,8(%r4)
|
||||
lpswe 0(%r4)
|
||||
.Lcontinue:
|
||||
BR_EX_DMA_r14
|
||||
ENDPROC(_diag308_reset_dma)
|
||||
BR_EX_AMODE31_r14
|
||||
ENDPROC(_diag308_reset_amode31)
|
||||
|
||||
.section .dma.data,"aw",@progbits
|
||||
.section .amode31.data,"aw",@progbits
|
||||
.align 8
|
||||
.Lrestart_diag308_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
|
||||
.align 8
|
||||
.Lcontinue_psw:
|
||||
.quad 0,.Lcontinue
|
||||
.quad 0,0
|
||||
|
||||
.align 8
|
||||
.Lctlreg0:
|
@ -406,7 +406,7 @@ static ssize_t dispatching_store(struct device *dev,
|
||||
if (val != 0 && val != 1)
|
||||
return -EINVAL;
|
||||
rc = 0;
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
mutex_lock(&smp_cpu_state_mutex);
|
||||
if (cpu_management == val)
|
||||
goto out;
|
||||
@ -417,7 +417,7 @@ static ssize_t dispatching_store(struct device *dev,
|
||||
topology_expect_change();
|
||||
out:
|
||||
mutex_unlock(&smp_cpu_state_mutex);
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return rc ? rc : count;
|
||||
}
|
||||
static DEVICE_ATTR_RW(dispatching);
|
||||
|
@ -291,7 +291,7 @@ static void __init test_monitor_call(void)
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
sort_extable(__start_dma_ex_table, __stop_dma_ex_table);
|
||||
sort_extable(__start_amode31_ex_table, __stop_amode31_ex_table);
|
||||
local_mcck_enable();
|
||||
test_monitor_call();
|
||||
}
|
||||
|
@ -51,24 +51,9 @@ void __init setup_uv(void)
|
||||
{
|
||||
unsigned long uv_stor_base;
|
||||
|
||||
/*
|
||||
* keep these conditions in line with has_uv_sec_stor_limit()
|
||||
*/
|
||||
if (!is_prot_virt_host())
|
||||
return;
|
||||
|
||||
if (is_prot_virt_guest()) {
|
||||
prot_virt_host = 0;
|
||||
pr_warn("Protected virtualization not available in protected guests.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_facility(158)) {
|
||||
prot_virt_host = 0;
|
||||
pr_warn("Protected virtualization not supported by the hardware.");
|
||||
return;
|
||||
}
|
||||
|
||||
uv_stor_base = (unsigned long)memblock_alloc_try_nid(
|
||||
uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
|
||||
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
|
||||
|
@ -36,6 +36,7 @@ CPPFLAGS_vdso32.lds += -P -C -U$(ARCH)
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KASAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
|
||||
|
@ -39,6 +39,7 @@ CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KASAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ftrace.lds.h>
|
||||
|
||||
/*
|
||||
* Put .bss..swapper_pg_dir as the first thing in .bss. This will
|
||||
@ -46,6 +47,7 @@ SECTIONS
|
||||
KPROBES_TEXT
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
FTRACE_HOTPATCH_TRAMPOLINES_TEXT
|
||||
*(.text.*_indirect_*)
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
@ -71,6 +73,13 @@ SECTIONS
|
||||
RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE)
|
||||
BOOT_DATA_PRESERVED
|
||||
|
||||
. = ALIGN(8);
|
||||
.amode31.refs : {
|
||||
_start_amode31_refs = .;
|
||||
*(.amode31.refs)
|
||||
_end_amode31_refs = .;
|
||||
}
|
||||
|
||||
_edata = .; /* End of data section */
|
||||
|
||||
/* will be freed after init */
|
||||
@ -136,6 +145,32 @@ SECTIONS
|
||||
|
||||
BOOT_DATA
|
||||
|
||||
/*
|
||||
* .amode31 section for code, data, ex_table that need to stay
|
||||
* below 2 GB, even when the kernel is relocated above 2 GB.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_samode31 = .;
|
||||
.amode31.text : {
|
||||
_stext_amode31 = .;
|
||||
*(.amode31.text)
|
||||
*(.amode31.text.*_indirect_*)
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_etext_amode31 = .;
|
||||
}
|
||||
. = ALIGN(16);
|
||||
.amode31.ex_table : {
|
||||
_start_amode31_ex_table = .;
|
||||
KEEP(*(.amode31.ex_table))
|
||||
_stop_amode31_ex_table = .;
|
||||
}
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.amode31.data : {
|
||||
*(.amode31.data)
|
||||
}
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_eamode31 = .;
|
||||
|
||||
/* early.c uses stsi, which requires page aligned data. */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
INIT_DATA_SECTION(0x100)
|
||||
|
@ -7,17 +7,10 @@
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>,
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/processor.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/irqflags.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/irq.h>
|
||||
#include <asm/vtimer.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/timex.h>
|
||||
|
||||
void __delay(unsigned long loops)
|
||||
{
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/kasan.h>
|
||||
@ -21,6 +22,10 @@ enum address_markers_idx {
|
||||
IDENTITY_BEFORE_END_NR,
|
||||
KERNEL_START_NR,
|
||||
KERNEL_END_NR,
|
||||
#ifdef CONFIG_KFENCE
|
||||
KFENCE_START_NR,
|
||||
KFENCE_END_NR,
|
||||
#endif
|
||||
IDENTITY_AFTER_NR,
|
||||
IDENTITY_AFTER_END_NR,
|
||||
#ifdef CONFIG_KASAN
|
||||
@ -40,6 +45,10 @@ static struct addr_marker address_markers[] = {
|
||||
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
|
||||
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
|
||||
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
|
||||
#ifdef CONFIG_KFENCE
|
||||
[KFENCE_START_NR] = {0, "KFence Pool Start"},
|
||||
[KFENCE_END_NR] = {0, "KFence Pool End"},
|
||||
#endif
|
||||
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
|
||||
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
|
||||
#ifdef CONFIG_KASAN
|
||||
@ -248,6 +257,9 @@ static void sort_address_markers(void)
|
||||
|
||||
static int pt_dump_init(void)
|
||||
{
|
||||
#ifdef CONFIG_KFENCE
|
||||
unsigned long kfence_start = (unsigned long)__kfence_pool;
|
||||
#endif
|
||||
/*
|
||||
* Figure out the maximum virtual address being accessible with the
|
||||
* kernel ASCE. We need this to keep the page table walker functions
|
||||
@ -262,6 +274,10 @@ static int pt_dump_init(void)
|
||||
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
|
||||
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
|
||||
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
|
||||
#ifdef CONFIG_KFENCE
|
||||
address_markers[KFENCE_START_NR].start_address = kfence_start;
|
||||
address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
|
||||
#endif
|
||||
sort_address_markers();
|
||||
#ifdef CONFIG_PTDUMP_DEBUGFS
|
||||
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/kfence.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/diag.h>
|
||||
#include <asm/gmap.h>
|
||||
@ -230,8 +231,8 @@ const struct exception_table_entry *s390_search_extables(unsigned long addr)
|
||||
{
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
fixup = search_extable(__start_dma_ex_table,
|
||||
__stop_dma_ex_table - __start_dma_ex_table,
|
||||
fixup = search_extable(__start_amode31_ex_table,
|
||||
__stop_amode31_ex_table - __start_amode31_ex_table,
|
||||
addr);
|
||||
if (!fixup)
|
||||
fixup = search_exception_tables(addr);
|
||||
@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
unsigned long address;
|
||||
unsigned int flags;
|
||||
vm_fault_t fault;
|
||||
bool is_write;
|
||||
|
||||
tsk = current;
|
||||
/*
|
||||
@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
|
||||
mm = tsk->mm;
|
||||
trans_exc_code = regs->int_parm_long;
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
is_write = (trans_exc_code & store_indication) == 0x400;
|
||||
|
||||
/*
|
||||
* Verify that the fault happened in user space, that
|
||||
@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
type = get_fault_type(regs);
|
||||
switch (type) {
|
||||
case KERNEL_FAULT:
|
||||
if (kfence_handle_page_fault(address, is_write, regs))
|
||||
return 0;
|
||||
goto out;
|
||||
case USER_FAULT:
|
||||
case GMAP_FAULT:
|
||||
@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
|
||||
break;
|
||||
}
|
||||
|
||||
address = trans_exc_code & __FAIL_ADDR_MASK;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
flags = FAULT_FLAG_DEFAULT;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
if (access == VM_WRITE || is_write)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
mmap_read_lock(mm);
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/kfence.h>
|
||||
#include <asm/ptdump.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/lowcore.h>
|
||||
@ -200,7 +201,7 @@ void __init mem_init(void)
|
||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
|
||||
|
||||
pv_init();
|
||||
|
||||
kfence_split_mapping();
|
||||
/* Setup guest page hinting */
|
||||
cmma_init();
|
||||
|
||||
|
@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
|
||||
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
|
||||
}
|
||||
|
||||
/*
|
||||
* The first 1MB of 1:1 mapping is mapped with 4KB pages
|
||||
*/
|
||||
while (address < end) {
|
||||
pg_dir = pgd_offset_k(address);
|
||||
if (pgd_none(*pg_dir)) {
|
||||
@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
|
||||
|
||||
pm_dir = pmd_offset(pu_dir, address);
|
||||
if (pmd_none(*pm_dir)) {
|
||||
if (mode == POPULATE_ZERO_SHADOW &&
|
||||
IS_ALIGNED(address, PMD_SIZE) &&
|
||||
if (IS_ALIGNED(address, PMD_SIZE) &&
|
||||
end - address >= PMD_SIZE) {
|
||||
pmd_populate(&init_mm, pm_dir,
|
||||
kasan_early_shadow_pte);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
/* the first megabyte of 1:1 is mapped with 4k pages */
|
||||
if (has_edat && address && end - address >= PMD_SIZE &&
|
||||
mode != POPULATE_ZERO_SHADOW) {
|
||||
void *page;
|
||||
if (mode == POPULATE_ZERO_SHADOW) {
|
||||
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
} else if (has_edat && address) {
|
||||
void *page;
|
||||
|
||||
if (mode == POPULATE_ONE2ONE) {
|
||||
page = (void *)address;
|
||||
} else {
|
||||
page = kasan_early_alloc_segment();
|
||||
memset(page, 0, _SEGMENT_SIZE);
|
||||
if (mode == POPULATE_ONE2ONE) {
|
||||
page = (void *)address;
|
||||
} else {
|
||||
page = kasan_early_alloc_segment();
|
||||
memset(page, 0, _SEGMENT_SIZE);
|
||||
}
|
||||
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
|
||||
address = (address + PMD_SIZE) & PMD_MASK;
|
||||
continue;
|
||||
}
|
||||
|
||||
pt_dir = kasan_early_pte_alloc();
|
||||
pmd_populate(&init_mm, pm_dir, pt_dir);
|
||||
} else if (pmd_large(*pm_dir)) {
|
||||
@ -300,7 +299,7 @@ void __init kasan_early_init(void)
|
||||
pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
|
||||
if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
|
||||
initrd_end =
|
||||
round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
|
||||
round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
|
||||
pgalloc_low = max(pgalloc_low, initrd_end);
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
|
||||
void *bounce = (void *) addr;
|
||||
unsigned long size;
|
||||
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
preempt_disable();
|
||||
if (is_swapped(addr)) {
|
||||
size = PAGE_SIZE - (addr & ~PAGE_MASK);
|
||||
@ -237,7 +237,7 @@ void *xlate_dev_mem_ptr(phys_addr_t addr)
|
||||
memcpy_absolute(bounce, (void *) addr, size);
|
||||
}
|
||||
preempt_enable();
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
return bounce;
|
||||
}
|
||||
|
||||
|
@ -228,46 +228,3 @@ void arch_set_page_dat(struct page *page, int order)
|
||||
return;
|
||||
set_page_stable_dat(page, order);
|
||||
}
|
||||
|
||||
void arch_set_page_nodat(struct page *page, int order)
|
||||
{
|
||||
if (cmma_flag < 2)
|
||||
return;
|
||||
set_page_stable_nodat(page, order);
|
||||
}
|
||||
|
||||
int arch_test_page_nodat(struct page *page)
|
||||
{
|
||||
unsigned char state;
|
||||
|
||||
if (cmma_flag < 2)
|
||||
return 0;
|
||||
state = get_page_state(page);
|
||||
return !!(state & 0x20);
|
||||
}
|
||||
|
||||
void arch_set_page_states(int make_stable)
|
||||
{
|
||||
unsigned long flags, order, t;
|
||||
struct list_head *l;
|
||||
struct page *page;
|
||||
struct zone *zone;
|
||||
|
||||
if (!cmma_flag)
|
||||
return;
|
||||
if (make_stable)
|
||||
drain_local_pages(NULL);
|
||||
for_each_populated_zone(zone) {
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for_each_migratetype_order(order, t) {
|
||||
list_for_each(l, &zone->free_area[order].free_list[t]) {
|
||||
page = list_entry(l, struct page, lru);
|
||||
if (make_stable)
|
||||
set_page_stable_dat(page, order);
|
||||
else
|
||||
set_page_unused(page, order);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&zone->lock, flags);
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/facility.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/kfence.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
@ -85,6 +86,8 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
{
|
||||
pte_t *ptep, new;
|
||||
|
||||
if (flags == SET_MEMORY_4K)
|
||||
return 0;
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
do {
|
||||
new = *ptep;
|
||||
@ -155,6 +158,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long next;
|
||||
int need_split;
|
||||
pmd_t *pmdp;
|
||||
int rc = 0;
|
||||
|
||||
@ -164,7 +168,10 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
return -EINVAL;
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_large(*pmdp)) {
|
||||
if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
|
||||
need_split = !!(flags & SET_MEMORY_4K);
|
||||
need_split |= !!(addr & ~PMD_MASK);
|
||||
need_split |= !!(addr + PMD_SIZE > next);
|
||||
if (need_split) {
|
||||
rc = split_pmd_page(pmdp, addr);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -232,6 +239,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long next;
|
||||
int need_split;
|
||||
pud_t *pudp;
|
||||
int rc = 0;
|
||||
|
||||
@ -241,7 +249,10 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
|
||||
return -EINVAL;
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_large(*pudp)) {
|
||||
if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
|
||||
need_split = !!(flags & SET_MEMORY_4K);
|
||||
need_split |= !!(addr & ~PUD_MASK);
|
||||
need_split |= !!(addr + PUD_SIZE > next);
|
||||
if (need_split) {
|
||||
rc = split_pud_page(pudp, addr);
|
||||
if (rc)
|
||||
break;
|
||||
@ -316,7 +327,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
|
||||
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
|
||||
static void ipte_range(pte_t *pte, unsigned long address, int nr)
|
||||
{
|
||||
@ -340,7 +351,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
pte_t *pte;
|
||||
|
||||
for (i = 0; i < numpages;) {
|
||||
address = page_to_phys(page + i);
|
||||
address = (unsigned long)page_to_virt(page + i);
|
||||
pte = virt_to_kpte(address);
|
||||
nr = (unsigned long)pte >> ilog2(sizeof(long));
|
||||
nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
|
||||
|
@ -581,7 +581,7 @@ void __init vmem_map_init(void)
|
||||
__set_memory((unsigned long)_sinittext,
|
||||
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
|
||||
__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
|
||||
SET_MEMORY_RO | SET_MEMORY_X);
|
||||
|
||||
/* we need lowcore executable for our LPSWE instructions */
|
||||
|
@ -113,13 +113,16 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
|
||||
{
|
||||
u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
|
||||
struct zpci_fib fib = {0};
|
||||
u8 status;
|
||||
u8 cc, status;
|
||||
|
||||
WARN_ON_ONCE(iota & 0x3fff);
|
||||
fib.pba = base;
|
||||
fib.pal = limit;
|
||||
fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
|
||||
return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
|
||||
cc = zpci_mod_fc(req, &fib, &status);
|
||||
if (cc)
|
||||
zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
|
||||
return cc;
|
||||
}
|
||||
|
||||
/* Modify PCI: Unregister I/O address translation parameters */
|
||||
@ -130,9 +133,9 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
|
||||
u8 cc, status;
|
||||
|
||||
cc = zpci_mod_fc(req, &fib, &status);
|
||||
if (cc == 3) /* Function already gone. */
|
||||
cc = 0;
|
||||
return cc ? -EIO : 0;
|
||||
if (cc)
|
||||
zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
|
||||
return cc;
|
||||
}
|
||||
|
||||
/* Modify PCI: Set PCI function measurement parameters */
|
||||
@ -659,32 +662,37 @@ void zpci_free_domain(int domain)
|
||||
|
||||
int zpci_enable_device(struct zpci_dev *zdev)
|
||||
{
|
||||
int rc;
|
||||
u32 fh = zdev->fh;
|
||||
int rc = 0;
|
||||
|
||||
rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
rc = zpci_dma_init_device(zdev);
|
||||
if (rc)
|
||||
goto out_dma;
|
||||
|
||||
return 0;
|
||||
|
||||
out_dma:
|
||||
clp_disable_fh(zdev);
|
||||
out:
|
||||
if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
|
||||
rc = -EIO;
|
||||
else
|
||||
zdev->fh = fh;
|
||||
return rc;
|
||||
}
|
||||
|
||||
int zpci_disable_device(struct zpci_dev *zdev)
|
||||
{
|
||||
zpci_dma_exit_device(zdev);
|
||||
/*
|
||||
* The zPCI function may already be disabled by the platform, this is
|
||||
* detected in clp_disable_fh() which becomes a no-op.
|
||||
*/
|
||||
return clp_disable_fh(zdev);
|
||||
u32 fh = zdev->fh;
|
||||
int cc, rc = 0;
|
||||
|
||||
cc = clp_disable_fh(zdev, &fh);
|
||||
if (!cc) {
|
||||
zdev->fh = fh;
|
||||
} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
|
||||
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
|
||||
zdev->fid);
|
||||
/* Function is already disabled - update handle */
|
||||
rc = clp_refresh_fh(zdev->fid, &fh);
|
||||
if (!rc) {
|
||||
zdev->fh = fh;
|
||||
rc = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
rc = -EIO;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -794,6 +802,11 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
|
||||
if (zdev->zbus->bus)
|
||||
zpci_bus_remove_device(zdev, false);
|
||||
|
||||
if (zdev->dma_table) {
|
||||
rc = zpci_dma_exit_device(zdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
if (zdev_enabled(zdev)) {
|
||||
rc = zpci_disable_device(zdev);
|
||||
if (rc)
|
||||
@ -817,6 +830,8 @@ void zpci_release_device(struct kref *kref)
|
||||
if (zdev->zbus->bus)
|
||||
zpci_bus_remove_device(zdev, false);
|
||||
|
||||
if (zdev->dma_table)
|
||||
zpci_dma_exit_device(zdev);
|
||||
if (zdev_enabled(zdev))
|
||||
zpci_disable_device(zdev);
|
||||
|
||||
@ -828,7 +843,8 @@ void zpci_release_device(struct kref *kref)
|
||||
case ZPCI_FN_STATE_STANDBY:
|
||||
if (zdev->has_hp_slot)
|
||||
zpci_exit_slot(zdev);
|
||||
zpci_cleanup_bus_resources(zdev);
|
||||
if (zdev->has_resources)
|
||||
zpci_cleanup_bus_resources(zdev);
|
||||
zpci_bus_device_unregister(zdev);
|
||||
zpci_destroy_iommu(zdev);
|
||||
fallthrough;
|
||||
@ -892,7 +908,6 @@ static void zpci_mem_exit(void)
|
||||
}
|
||||
|
||||
static unsigned int s390_pci_probe __initdata = 1;
|
||||
static unsigned int s390_pci_no_mio __initdata;
|
||||
unsigned int s390_pci_force_floating __initdata;
|
||||
static unsigned int s390_pci_initialized;
|
||||
|
||||
@ -903,7 +918,7 @@ char * __init pcibios_setup(char *str)
|
||||
return NULL;
|
||||
}
|
||||
if (!strcmp(str, "nomio")) {
|
||||
s390_pci_no_mio = 1;
|
||||
S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
|
||||
return NULL;
|
||||
}
|
||||
if (!strcmp(str, "force_floating")) {
|
||||
@ -934,7 +949,7 @@ static int __init pci_base_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (test_facility(153) && !s390_pci_no_mio) {
|
||||
if (MACHINE_HAS_PCI_MIO) {
|
||||
static_branch_enable(&have_mio);
|
||||
ctl_set_bit(2, 5);
|
||||
}
|
||||
|
@ -49,6 +49,11 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
|
||||
rc = zpci_enable_device(zdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
rc = zpci_dma_init_device(zdev);
|
||||
if (rc) {
|
||||
zpci_disable_device(zdev);
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
if (!zdev->has_resources) {
|
||||
@ -343,11 +348,11 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
{
|
||||
int rc = -EINVAL;
|
||||
|
||||
zdev->zbus = zbus;
|
||||
if (zbus->function[zdev->devfn]) {
|
||||
pr_err("devfn %04x is already assigned\n", zdev->devfn);
|
||||
return rc;
|
||||
}
|
||||
zdev->zbus = zbus;
|
||||
zbus->function[zdev->devfn] = zdev;
|
||||
zpci_nb_devices++;
|
||||
|
||||
@ -367,6 +372,7 @@ static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
|
||||
error:
|
||||
zbus->function[zdev->devfn] = NULL;
|
||||
zdev->zbus = NULL;
|
||||
zpci_nb_devices--;
|
||||
return rc;
|
||||
}
|
||||
|
@ -212,17 +212,22 @@ int clp_query_pci_fn(struct zpci_dev *zdev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int clp_refresh_fh(u32 fid);
|
||||
/*
|
||||
* Enable/Disable a given PCI function and update its function handle if
|
||||
* necessary
|
||||
/**
|
||||
* clp_set_pci_fn() - Execute a command on a PCI function
|
||||
* @zdev: Function that will be affected
|
||||
* @fh: Out parameter for updated function handle
|
||||
* @nr_dma_as: DMA address space number
|
||||
* @command: The command code to execute
|
||||
*
|
||||
* Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
|
||||
* > 0 for non-success platform responses
|
||||
*/
|
||||
static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
|
||||
static int clp_set_pci_fn(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as, u8 command)
|
||||
{
|
||||
struct clp_req_rsp_set_pci *rrb;
|
||||
int rc, retries = 100;
|
||||
u32 fid = zdev->fid;
|
||||
|
||||
*fh = 0;
|
||||
rrb = clp_alloc_block(GFP_KERNEL);
|
||||
if (!rrb)
|
||||
return -ENOMEM;
|
||||
@ -245,17 +250,13 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
|
||||
}
|
||||
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
|
||||
|
||||
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
|
||||
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
|
||||
*fh = rrb->response.fh;
|
||||
} else {
|
||||
zpci_err("Set PCI FN:\n");
|
||||
zpci_err_clp(rrb->response.hdr.rsp, rc);
|
||||
}
|
||||
|
||||
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
|
||||
zdev->fh = rrb->response.fh;
|
||||
} else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
|
||||
rrb->response.fh == 0) {
|
||||
/* Function is already in desired state - update handle */
|
||||
rc = clp_refresh_fh(fid);
|
||||
if (!rc)
|
||||
rc = rrb->response.hdr.rsp;
|
||||
}
|
||||
clp_free_block(rrb);
|
||||
return rc;
|
||||
@ -295,35 +296,62 @@ int clp_setup_writeback_mio(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
|
||||
int clp_enable_fh(struct zpci_dev *zdev, u32 *fh, u8 nr_dma_as)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
|
||||
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (zpci_use_mio(zdev)) {
|
||||
rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
|
||||
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
|
||||
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
|
||||
if (!rc && zpci_use_mio(zdev)) {
|
||||
rc = clp_set_pci_fn(zdev, fh, nr_dma_as, CLP_SET_ENABLE_MIO);
|
||||
zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
|
||||
zdev->fid, zdev->fh, rc);
|
||||
zdev->fid, *fh, rc);
|
||||
if (rc)
|
||||
clp_disable_fh(zdev);
|
||||
clp_disable_fh(zdev, fh);
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
int clp_disable_fh(struct zpci_dev *zdev)
|
||||
int clp_disable_fh(struct zpci_dev *zdev, u32 *fh)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!zdev_enabled(zdev))
|
||||
return 0;
|
||||
|
||||
rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
|
||||
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
|
||||
rc = clp_set_pci_fn(zdev, fh, 0, CLP_SET_DISABLE_PCI_FN);
|
||||
zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, *fh, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int clp_list_pci_req(struct clp_req_rsp_list_pci *rrb,
|
||||
u64 *resume_token, int *nentries)
|
||||
{
|
||||
int rc;
|
||||
|
||||
memset(rrb, 0, sizeof(*rrb));
|
||||
rrb->request.hdr.len = sizeof(rrb->request);
|
||||
rrb->request.hdr.cmd = CLP_LIST_PCI;
|
||||
/* store as many entries as possible */
|
||||
rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
|
||||
rrb->request.resume_token = *resume_token;
|
||||
|
||||
/* Get PCI function handle list */
|
||||
rc = clp_req(rrb, CLP_LPS_PCI);
|
||||
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
|
||||
zpci_err("List PCI FN:\n");
|
||||
zpci_err_clp(rrb->response.hdr.rsp, rc);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
update_uid_checking(rrb->response.uid_checking);
|
||||
WARN_ON_ONCE(rrb->response.entry_size !=
|
||||
sizeof(struct clp_fh_list_entry));
|
||||
|
||||
*nentries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
|
||||
rrb->response.entry_size;
|
||||
*resume_token = rrb->response.resume_token;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -331,40 +359,42 @@ static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
|
||||
void (*cb)(struct clp_fh_list_entry *, void *))
|
||||
{
|
||||
u64 resume_token = 0;
|
||||
int entries, i, rc;
|
||||
int nentries, i, rc;
|
||||
|
||||
do {
|
||||
memset(rrb, 0, sizeof(*rrb));
|
||||
rrb->request.hdr.len = sizeof(rrb->request);
|
||||
rrb->request.hdr.cmd = CLP_LIST_PCI;
|
||||
/* store as many entries as possible */
|
||||
rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
|
||||
rrb->request.resume_token = resume_token;
|
||||
|
||||
/* Get PCI function handle list */
|
||||
rc = clp_req(rrb, CLP_LPS_PCI);
|
||||
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
|
||||
zpci_err("List PCI FN:\n");
|
||||
zpci_err_clp(rrb->response.hdr.rsp, rc);
|
||||
rc = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
update_uid_checking(rrb->response.uid_checking);
|
||||
WARN_ON_ONCE(rrb->response.entry_size !=
|
||||
sizeof(struct clp_fh_list_entry));
|
||||
|
||||
entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
|
||||
rrb->response.entry_size;
|
||||
|
||||
resume_token = rrb->response.resume_token;
|
||||
for (i = 0; i < entries; i++)
|
||||
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
|
||||
if (rc)
|
||||
return rc;
|
||||
for (i = 0; i < nentries; i++)
|
||||
cb(&rrb->response.fh_list[i], data);
|
||||
} while (resume_token);
|
||||
out:
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int clp_find_pci(struct clp_req_rsp_list_pci *rrb, u32 fid,
|
||||
struct clp_fh_list_entry *entry)
|
||||
{
|
||||
struct clp_fh_list_entry *fh_list;
|
||||
u64 resume_token = 0;
|
||||
int nentries, i, rc;
|
||||
|
||||
do {
|
||||
rc = clp_list_pci_req(rrb, &resume_token, &nentries);
|
||||
if (rc)
|
||||
return rc;
|
||||
for (i = 0; i < nentries; i++) {
|
||||
fh_list = rrb->response.fh_list;
|
||||
if (fh_list[i].fid == fid) {
|
||||
*entry = fh_list[i];
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
} while (resume_token);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __clp_add(struct clp_fh_list_entry *entry, void *data)
|
||||
{
|
||||
struct zpci_dev *zdev;
|
||||
@ -392,67 +422,41 @@ int clp_scan_pci_devices(void)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
|
||||
{
|
||||
struct zpci_dev *zdev;
|
||||
u32 fid = *((u32 *)data);
|
||||
|
||||
if (!entry->vendor_id || fid != entry->fid)
|
||||
return;
|
||||
|
||||
zdev = get_zdev_by_fid(fid);
|
||||
if (!zdev)
|
||||
return;
|
||||
|
||||
zdev->fh = entry->fh;
|
||||
}
|
||||
|
||||
/*
|
||||
* Refresh the function handle of the function matching @fid
|
||||
* Get the current function handle of the function matching @fid
|
||||
*/
|
||||
static int clp_refresh_fh(u32 fid)
|
||||
int clp_refresh_fh(u32 fid, u32 *fh)
|
||||
{
|
||||
struct clp_req_rsp_list_pci *rrb;
|
||||
struct clp_fh_list_entry entry;
|
||||
int rc;
|
||||
|
||||
rrb = clp_alloc_block(GFP_NOWAIT);
|
||||
if (!rrb)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
|
||||
rc = clp_find_pci(rrb, fid, &entry);
|
||||
if (!rc)
|
||||
*fh = entry.fh;
|
||||
|
||||
clp_free_block(rrb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct clp_state_data {
|
||||
u32 fid;
|
||||
enum zpci_state state;
|
||||
};
|
||||
|
||||
static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
|
||||
{
|
||||
struct clp_state_data *sd = data;
|
||||
|
||||
if (entry->fid != sd->fid)
|
||||
return;
|
||||
|
||||
sd->state = entry->config_state;
|
||||
}
|
||||
|
||||
int clp_get_state(u32 fid, enum zpci_state *state)
|
||||
{
|
||||
struct clp_req_rsp_list_pci *rrb;
|
||||
struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
|
||||
struct clp_fh_list_entry entry;
|
||||
int rc;
|
||||
|
||||
*state = ZPCI_FN_STATE_RESERVED;
|
||||
rrb = clp_alloc_block(GFP_ATOMIC);
|
||||
if (!rrb)
|
||||
return -ENOMEM;
|
||||
|
||||
rc = clp_list_pci(rrb, &sd, __clp_get_state);
|
||||
rc = clp_find_pci(rrb, fid, &entry);
|
||||
if (!rc)
|
||||
*state = sd.state;
|
||||
*state = entry.config_state;
|
||||
|
||||
clp_free_block(rrb);
|
||||
return rc;
|
||||
|
@ -590,10 +590,11 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
}
|
||||
|
||||
}
|
||||
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
(u64) zdev->dma_table);
|
||||
if (rc)
|
||||
if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
(u64)zdev->dma_table)) {
|
||||
rc = -EIO;
|
||||
goto free_bitmap;
|
||||
}
|
||||
|
||||
return 0;
|
||||
free_bitmap:
|
||||
@ -608,17 +609,25 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void zpci_dma_exit_device(struct zpci_dev *zdev)
|
||||
int zpci_dma_exit_device(struct zpci_dev *zdev)
|
||||
{
|
||||
int cc = 0;
|
||||
|
||||
/*
|
||||
* At this point, if the device is part of an IOMMU domain, this would
|
||||
* be a strong hint towards a bug in the IOMMU API (common) code and/or
|
||||
* simultaneous access via IOMMU and DMA API. So let's issue a warning.
|
||||
*/
|
||||
WARN_ON(zdev->s390_domain);
|
||||
|
||||
if (zpci_unregister_ioat(zdev, 0))
|
||||
return;
|
||||
if (zdev_enabled(zdev))
|
||||
cc = zpci_unregister_ioat(zdev, 0);
|
||||
/*
|
||||
* cc == 3 indicates the function is gone already. This can happen
|
||||
* if the function was deconfigured/disabled suddenly and we have not
|
||||
* received a new handle yet.
|
||||
*/
|
||||
if (cc && cc != 3)
|
||||
return -EIO;
|
||||
|
||||
dma_cleanup_tables(zdev->dma_table);
|
||||
zdev->dma_table = NULL;
|
||||
@ -626,8 +635,8 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
|
||||
zdev->iommu_bitmap = NULL;
|
||||
vfree(zdev->lazy_bitmap);
|
||||
zdev->lazy_bitmap = NULL;
|
||||
|
||||
zdev->next_bit = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init dma_alloc_cpu_table_caches(void)
|
||||
|
@ -84,7 +84,10 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
|
||||
/* Even though the device is already gone we still
|
||||
* need to free zPCI resources as part of the disable.
|
||||
*/
|
||||
zpci_disable_device(zdev);
|
||||
if (zdev->dma_table)
|
||||
zpci_dma_exit_device(zdev);
|
||||
if (zdev_enabled(zdev))
|
||||
zpci_disable_device(zdev);
|
||||
zdev->state = ZPCI_FN_STATE_STANDBY;
|
||||
}
|
||||
|
||||
|
@ -82,13 +82,26 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
|
||||
pci_lock_rescan_remove();
|
||||
if (pci_dev_is_added(pdev)) {
|
||||
pci_stop_and_remove_bus_device(pdev);
|
||||
ret = zpci_disable_device(zdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (zdev->dma_table) {
|
||||
ret = zpci_dma_exit_device(zdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (zdev_enabled(zdev)) {
|
||||
ret = zpci_disable_device(zdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = zpci_enable_device(zdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = zpci_dma_init_device(zdev);
|
||||
if (ret) {
|
||||
zpci_disable_device(zdev);
|
||||
goto out;
|
||||
}
|
||||
pci_rescan_bus(zdev->zbus->bus);
|
||||
}
|
||||
out:
|
||||
|
@ -19,6 +19,7 @@ KCOV_INSTRUMENT := n
|
||||
GCOV_PROFILE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KASAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
|
||||
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
|
||||
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
|
||||
|
@ -189,6 +189,8 @@ ad stosm SI_URD
|
||||
ae sigp RS_RRRD
|
||||
af mc SI_URD
|
||||
b1 lra RX_RRRD
|
||||
b200 lbear S_RD
|
||||
b201 stbear S_RD
|
||||
b202 stidp S_RD
|
||||
b204 sck S_RD
|
||||
b205 stck S_RD
|
||||
@ -523,6 +525,7 @@ b931 clgfr RRE_RR
|
||||
b938 sortl RRE_RR
|
||||
b939 dfltcc RRF_R0RR2
|
||||
b93a kdsa RRE_RR
|
||||
b93b nnpa RRE_00
|
||||
b93c ppno RRE_RR
|
||||
b93e kimd RRE_RR
|
||||
b93f klmd RRE_RR
|
||||
@ -562,6 +565,7 @@ b987 dlgr RRE_RR
|
||||
b988 alcgr RRE_RR
|
||||
b989 slbgr RRE_RR
|
||||
b98a cspg RRE_RR
|
||||
b98b rdp RRF_RURR2
|
||||
b98d epsw RRE_RR
|
||||
b98e idte RRF_RURR2
|
||||
b98f crdte RRF_RURR2
|
||||
@ -876,19 +880,32 @@ e63d vstrl VSI_URDV
|
||||
e63f vstrlr VRS_RRDV
|
||||
e649 vlip VRI_V0UU2
|
||||
e650 vcvb VRR_RV0UU
|
||||
e651 vclzdp VRR_VV0U2
|
||||
e652 vcvbg VRR_RV0UU
|
||||
e654 vupkzh VRR_VV0U2
|
||||
e655 vcnf VRR_VV0UU2
|
||||
e656 vclfnh VRR_VV0UU2
|
||||
e658 vcvd VRI_VR0UU
|
||||
e659 vsrp VRI_VVUUU2
|
||||
e65a vcvdg VRI_VR0UU
|
||||
e65b vpsop VRI_VVUUU2
|
||||
e65c vupkzl VRR_VV0U2
|
||||
e65d vcfn VRR_VV0UU2
|
||||
e65e vclfnl VRR_VV0UU2
|
||||
e65f vtp VRR_0V
|
||||
e670 vpkzr VRI_VVV0UU2
|
||||
e671 vap VRI_VVV0UU2
|
||||
e672 vsrpr VRI_VVV0UU2
|
||||
e673 vsp VRI_VVV0UU2
|
||||
e674 vschp VRR_VVV0U0U
|
||||
e675 vcrnf VRR_VVV0UU
|
||||
e677 vcp VRR_0VV0U
|
||||
e678 vmp VRI_VVV0UU2
|
||||
e679 vmsp VRI_VVV0UU2
|
||||
e67a vdp VRI_VVV0UU2
|
||||
e67b vrp VRI_VVV0UU2
|
||||
e67c vscshp VRR_VVV
|
||||
e67d vcsph VRR_VVV0U0
|
||||
e67e vsdp VRI_VVV0UU2
|
||||
e700 vleb VRX_VRRDU
|
||||
e701 vleh VRX_VRRDU
|
||||
@ -1081,6 +1098,7 @@ eb61 stric RSY_RDRU
|
||||
eb62 mric RSY_RDRU
|
||||
eb6a asi SIY_IRD
|
||||
eb6e alsi SIY_IRD
|
||||
eb71 lpswey SIY_URD
|
||||
eb7a agsi SIY_IRD
|
||||
eb7e algsi SIY_IRD
|
||||
eb80 icmh RSY_RURD
|
||||
|
@ -8,6 +8,8 @@
|
||||
#ifndef _ASM_X86_KFENCE_H
|
||||
#define _ASM_X86_KFENCE_H
|
||||
|
||||
#ifndef MODULE
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kfence.h>
|
||||
|
||||
@ -66,4 +68,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* !MODULE */
|
||||
|
||||
#endif /* _ASM_X86_KFENCE_H */
|
||||
|
@ -90,7 +90,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
struct zpci_dev *zdev = to_zpci_dev(dev);
|
||||
struct s390_domain_device *domain_device;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
int cc, rc;
|
||||
|
||||
if (!zdev)
|
||||
return -ENODEV;
|
||||
@ -99,14 +99,21 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
if (!domain_device)
|
||||
return -ENOMEM;
|
||||
|
||||
if (zdev->dma_table)
|
||||
zpci_dma_exit_device(zdev);
|
||||
if (zdev->dma_table) {
|
||||
cc = zpci_dma_exit_device(zdev);
|
||||
if (cc) {
|
||||
rc = -EIO;
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
|
||||
zdev->dma_table = s390_domain->dma_table;
|
||||
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
|
||||
(u64) zdev->dma_table);
|
||||
if (rc)
|
||||
if (cc) {
|
||||
rc = -EIO;
|
||||
goto out_restore;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&s390_domain->list_lock, flags);
|
||||
/* First device defines the DMA range limits */
|
||||
@ -130,6 +137,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
|
||||
|
||||
out_restore:
|
||||
zpci_dma_init_device(zdev);
|
||||
out_free:
|
||||
kfree(domain_device);
|
||||
|
||||
return rc;
|
||||
|
@ -575,10 +575,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
else
|
||||
argp = (void __user *)arg;
|
||||
|
||||
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg) {
|
||||
PRINT_DEBUG("empty data ptr");
|
||||
if ((_IOC_DIR(cmd) != _IOC_NONE) && !arg)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user