diff options
Diffstat (limited to 'arch/s390/kernel')
40 files changed, 703 insertions, 676 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 7a562b4199..db2d9ba5a8 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -11,6 +11,8 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) # Do not trace early setup code CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_stacktrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_unwind_bc.o = $(CC_FLAGS_FTRACE) endif @@ -64,6 +66,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += mcount.o obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o obj-$(CONFIG_UPROBES) += uprobes.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index fa5f6885c7..2f65bca2f3 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -66,6 +66,11 @@ int main(void) OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys); DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame)); BLANK(); + OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain); + DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user)); + OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address); + DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper)); + BLANK(); /* idle data offsets */ OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); OFFSET(__TIMER_IDLE_ENTER, s390_idle_data, timer_idle_enter); diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index f8fc6c25d0..1942e2a9f8 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -24,12 +24,12 @@ #include <linux/tty.h> #include <linux/personality.h> #include <linux/binfmts.h> +#include <asm/access-regs.h> #include <asm/ucontext.h> #include <linux/uaccess.h> #include <asm/lowcore.h> -#include <asm/switch_to.h> #include <asm/vdso.h> -#include <asm/fpu/api.h> +#include <asm/fpu.h> #include "compat_linux.h" #include "compat_ptrace.h" #include "entry.h" @@ -56,7 +56,7 @@ typedef struct static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(); + save_user_fpu_regs(); } /* Load registers after signal return */ @@ -79,7 +79,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs) user_sregs.regs.gprs[i] = (__u32) regs->gprs[i]; memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); + fpregs_store((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.ufpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs32))) return -EFAULT; return 0; @@ -113,7 +113,7 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) regs->gprs[i] = (__u64) user_sregs.regs.gprs[i]; memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - fpregs_load((_s390_fp_regs *) &user_sregs.fpregs, ¤t->thread.fpu); + fpregs_load((_s390_fp_regs *)&user_sregs.fpregs, ¤t->thread.ufpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -136,11 +136,11 @@ static int save_sigregs_ext32(struct pt_regs *regs, /* Save vector registers to signal stack */ if (cpu_has_vx()) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = current->thread.fpu.vxrs[i].low; + vxrs[i] = current->thread.ufpu.vxrs[i].low; if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.fpu.vxrs + __NUM_VXRS_LOW, + current->thread.ufpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -165,12 +165,12 @@ static int restore_sigregs_ext32(struct pt_regs *regs, if (cpu_has_vx()) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - current->thread.fpu.vxrs[i].low = vxrs[i]; + current->thread.ufpu.vxrs[i].low = vxrs[i]; } return 0; } @@ -184,7 +184,7 @@ COMPAT_SYSCALL_DEFINE0(sigreturn) if (get_compat_sigset(&set, (compat_sigset_t __user *)frame->sc.oldmask)) goto badframe; set_current_blocked(&set); - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs32(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext32(regs, &frame->sregs_ext)) @@ -207,7 +207,7 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (compat_restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 5c46c26593..d09ebb6f52 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -22,7 +22,7 @@ #include <asm/ipl.h> #include <asm/sclp.h> #include <asm/maccess.h> -#include <asm/fpu/api.h> +#include <asm/fpu.h> #define PTR_ADD(x, y) (((char *) (x)) + ((unsigned long) (y))) #define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c index 92fdc35f02..8dee9aa0ec 100644 --- a/arch/s390/kernel/diag.c +++ b/arch/s390/kernel/diag.c @@ -147,11 +147,40 @@ void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr) EXPORT_SYMBOL(diag_stat_inc_norecursion); /* + * Diagnose 0c: Pseudo Timer + */ +void diag0c(struct hypfs_diag0c_entry *data) +{ + diag_stat_inc(DIAG_STAT_X00C); + diag_amode31_ops.diag0c(virt_to_phys(data)); +} + +/* * Diagnose 14: Input spool file manipulation + * + * The subcode parameter determines the type of the first parameter rx. + * Currently used are the following 3 subcommands: + * 0x0: Read the Next Spool File Buffer (Data Record) + * 0x28: Position a Spool File to the Designated Record + * 0xfff: Retrieve Next File Descriptor + * + * For subcommands 0x0 and 0xfff, the value of the first parameter is + * a virtual address of a memory buffer and needs virtual to physical + * address translation. For other subcommands the rx parameter is not + * a virtual address. */ int diag14(unsigned long rx, unsigned long ry1, unsigned long subcode) { diag_stat_inc(DIAG_STAT_X014); + switch (subcode) { + case 0x0: + case 0xfff: + rx = virt_to_phys((void *)rx); + break; + default: + /* Do nothing */ + break; + } return diag_amode31_ops.diag14(rx, ry1, subcode); } EXPORT_SYMBOL(diag14); @@ -265,6 +294,6 @@ EXPORT_SYMBOL(diag224); int diag26c(void *req, void *resp, enum diag26c_sc subcode) { diag_stat_inc(DIAG_STAT_X26C); - return diag_amode31_ops.diag26c(req, resp, subcode); + return diag_amode31_ops.diag26c(virt_to_phys(req), virt_to_phys(resp), subcode); } EXPORT_SYMBOL(diag26c); diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 2345ea332b..c666271433 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -19,8 +19,10 @@ #include <linux/kernel.h> #include <asm/asm-extable.h> #include <linux/memblock.h> +#include <asm/access-regs.h> #include <asm/diag.h> #include <asm/ebcdic.h> +#include <asm/fpu.h> #include <asm/ipl.h> #include <asm/lowcore.h> #include <asm/processor.h> @@ -31,7 +33,6 @@ #include <asm/sclp.h> #include <asm/facility.h> #include <asm/boot_data.h> -#include <asm/switch_to.h> #include "entry.h" #define decompressor_handled_param(param) \ diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 26c08ee877..6a1e0fbbaa 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -24,7 +24,7 @@ #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> -#include <asm/vx-insn.h> +#include <asm/fpu-insn.h> #include <asm/setup.h> #include <asm/nmi.h> #include <asm/nospec-insn.h> @@ -119,33 +119,11 @@ _LPP_OFFSET = __LC_LPP .endm #if IS_ENABLED(CONFIG_KVM) - /* - * The OUTSIDE macro jumps to the provided label in case the value - * in the provided register is outside of the provided range. The - * macro is useful for checking whether a PSW stored in a register - * pair points inside or outside of a block of instructions. - * @reg: register to check - * @start: start of the range - * @end: end of the range - * @outside_label: jump here if @reg is outside of [@start..@end) - */ - .macro OUTSIDE reg,start,end,outside_label - lgr %r14,\reg - larl %r13,\start - slgr %r14,%r13 - clgfrl %r14,.Lrange_size\@ - jhe \outside_label - .section .rodata, "a" - .balign 4 -.Lrange_size\@: - .long \end - \start - .previous - .endm - - .macro SIEEXIT - lg %r9,__SF_SIE_CONTROL(%r15) # get control block pointer + .macro SIEEXIT sie_control + lg %r9,\sie_control # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce + ni __LC_CPU_FLAGS+7,255-_CIF_SIE larl %r9,sie_exit # skip forward to sie_exit .endm #endif @@ -171,13 +149,13 @@ _LPP_OFFSET = __LC_LPP nop 0 /* - * Scheduler resume function, called by switch_to - * gpr2 = (task_struct *) prev - * gpr3 = (task_struct *) next + * Scheduler resume function, called by __switch_to + * gpr2 = (task_struct *)prev + * gpr3 = (task_struct *)next * Returns: * gpr2 = prev */ -SYM_FUNC_START(__switch_to) +SYM_FUNC_START(__switch_to_asm) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task lghi %r4,__TASK_stack lghi %r1,__TASK_thread @@ -193,7 +171,7 @@ SYM_FUNC_START(__switch_to) lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task ALTERNATIVE "nop", "lpp _LPP_OFFSET", 40 BR_EX %r14 -SYM_FUNC_END(__switch_to) +SYM_FUNC_END(__switch_to_asm) #if IS_ENABLED(CONFIG_KVM) /* @@ -214,14 +192,13 @@ SYM_FUNC_START(__sie64a) lg %r14,__LC_GMAP # get gmap pointer ltgr %r14,%r14 jz .Lsie_gmap + oi __LC_CPU_FLAGS+7,_CIF_SIE lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce .Lsie_gmap: lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now tm __SIE_PROG20+3(%r14),3 # last exit... jnz .Lsie_skip - TSTMSK __LC_CPU_FLAGS,_CIF_FPU - jo .Lsie_skip # exit if fp/vx regs changed lg %r14,__SF_SIE_CONTROL_PHYS(%r15) # get sie block phys addr BPEXIT __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST .Lsie_entry: @@ -236,7 +213,7 @@ SYM_FUNC_START(__sie64a) lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE lctlg %c1,%c1,__LC_KERNEL_ASCE # load primary asce -.Lsie_done: + ni __LC_CPU_FLAGS+7,255-_CIF_SIE # some program checks are suppressing. C code (e.g. do_protection_exception) # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable. @@ -339,20 +316,13 @@ SYM_CODE_START(pgm_check_handler) stpt __LC_SYS_ENTER_TIMER BPOFF stmg %r8,%r15,__LC_SAVE_AREA_SYNC - lghi %r10,0 + lgr %r10,%r15 lmg %r8,%r9,__LC_PGM_OLD_PSW tmhh %r8,0x0001 # coming from user space? jno .Lpgm_skip_asce lctlg %c1,%c1,__LC_KERNEL_ASCE j 3f # -> fault in user space .Lpgm_skip_asce: -#if IS_ENABLED(CONFIG_KVM) - # cleanup critical section for program checks in __sie64a - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f - BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST - SIEEXIT - lghi %r10,_PIF_GUEST_FAULT -#endif 1: tmhh %r8,0x4000 # PER bit set in old PSW ? jnz 2f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception @@ -363,13 +333,21 @@ SYM_CODE_START(pgm_check_handler) CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f 3: lg %r15,__LC_KERNEL_STACK 4: la %r11,STACK_FRAME_OVERHEAD(%r15) - stg %r10,__PT_FLAGS(%r11) + xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_LAST_BREAK(8,%r11),__LC_PGM_LAST_BREAK - stmg %r8,%r9,__PT_PSW(%r11) - + stctg %c1,%c1,__PT_CR1(%r11) +#if IS_ENABLED(CONFIG_KVM) + ltg %r12,__LC_GMAP + jz 5f + clc __GMAP_ASCE(8,%r12), __PT_CR1(%r11) + jne 5f + BPENTER __SF_SIE_FLAGS(%r10),_TIF_ISOLATE_BP_GUEST + SIEEXIT __SF_SIE_CONTROL(%r10) +#endif +5: stmg %r8,%r9,__PT_PSW(%r11) # clear user controlled registers to prevent speculative use xgr %r0,%r0 xgr %r1,%r1 @@ -418,9 +396,10 @@ SYM_CODE_START(\name) tmhh %r8,0x0001 # interrupting from user ? jnz 1f #if IS_ENABLED(CONFIG_KVM) - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f + TSTMSK __LC_CPU_FLAGS,_CIF_SIE + jz 0f BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST - SIEEXIT + SIEEXIT __SF_SIE_CONTROL(%r15) #endif 0: CHECK_STACK __LC_SAVE_AREA_ASYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) @@ -489,16 +468,11 @@ SYM_FUNC_END(psw_idle) */ SYM_CODE_START(mcck_int_handler) BPOFF - la %r1,4095 # validate r1 - spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # validate cpu timer - LBEAR __LC_LAST_BREAK_SAVE_AREA-4095(%r1) # validate bear - lmg %r0,%r15,__LC_GPREGS_SAVE_AREA # validate gprs lmg %r8,%r9,__LC_MCK_OLD_PSW TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE jo .Lmcck_panic # yes -> rest of mcck code invalid TSTMSK __LC_MCCK_CODE,MCCK_CODE_CR_VALID jno .Lmcck_panic # control registers invalid -> panic - lctlg %c0,%c15,__LC_CREGS_SAVE_AREA # validate ctl regs ptlb lghi %r14,__LC_CPU_TIMER_SAVE_AREA mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) @@ -520,11 +494,20 @@ SYM_CODE_START(mcck_int_handler) TSTMSK __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID jno .Lmcck_panic #if IS_ENABLED(CONFIG_KVM) - OUTSIDE %r9,.Lsie_gmap,.Lsie_done,.Lmcck_user - OUTSIDE %r9,.Lsie_entry,.Lsie_leave,4f + TSTMSK __LC_CPU_FLAGS,_CIF_SIE + jz .Lmcck_user + # Need to compare the address instead of a CIF_SIE* flag. + # Otherwise there would be a race between setting the flag + # and entering SIE (or leaving and clearing the flag). This + # would cause machine checks targeted at the guest to be + # handled by the host. + larl %r14,.Lsie_entry + clgrjl %r9,%r14, 4f + larl %r14,.Lsie_leave + clgrjhe %r9,%r14, 4f oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST 4: BPENTER __SF_SIE_FLAGS(%r15),_TIF_ISOLATE_BP_GUEST - SIEEXIT + SIEEXIT __SF_SIE_CONTROL(%r15) #endif .Lmcck_user: lg %r15,__LC_MCCK_STACK diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index 9f41853f36..21969520f9 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -19,6 +19,7 @@ void mcck_int_handler(void); void restart_int_handler(void); void early_pgm_check_handler(void); +struct task_struct *__switch_to_asm(struct task_struct *prev, struct task_struct *next); void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs); void __do_pgm_check(struct pt_regs *regs); void __do_syscall(struct pt_regs *regs, int per_trap); diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index a4f3449cc8..fa90bbdc5e 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -8,256 +8,186 @@ #include <linux/kernel.h> #include <linux/cpu.h> #include <linux/sched.h> -#include <asm/fpu/types.h> -#include <asm/fpu/api.h> -#include <asm/vx-insn.h> +#include <asm/fpu.h> -void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) +void __kernel_fpu_begin(struct kernel_fpu *state, int flags) { + __vector128 *vxrs = state->vxrs; + int mask; + /* * Limit the save to the FPU/vector registers already - * in use by the previous context + * in use by the previous context. */ - flags &= state->mask; - + flags &= state->hdr.mask; if (flags & KERNEL_FPC) - /* Save floating point control */ - asm volatile("stfpc %0" : "=Q" (state->fpc)); - + fpu_stfpc(&state->hdr.fpc); if (!cpu_has_vx()) { - if (flags & KERNEL_VXR_V0V7) { - /* Save floating-point registers */ - asm volatile("std 0,%0" : "=Q" (state->fprs[0])); - asm volatile("std 1,%0" : "=Q" (state->fprs[1])); - asm volatile("std 2,%0" : "=Q" (state->fprs[2])); - asm volatile("std 3,%0" : "=Q" (state->fprs[3])); - asm volatile("std 4,%0" : "=Q" (state->fprs[4])); - asm volatile("std 5,%0" : "=Q" (state->fprs[5])); - asm volatile("std 6,%0" : "=Q" (state->fprs[6])); - asm volatile("std 7,%0" : "=Q" (state->fprs[7])); - asm volatile("std 8,%0" : "=Q" (state->fprs[8])); - asm volatile("std 9,%0" : "=Q" (state->fprs[9])); - asm volatile("std 10,%0" : "=Q" (state->fprs[10])); - asm volatile("std 11,%0" : "=Q" (state->fprs[11])); - asm volatile("std 12,%0" : "=Q" (state->fprs[12])); - asm volatile("std 13,%0" : "=Q" (state->fprs[13])); - asm volatile("std 14,%0" : "=Q" (state->fprs[14])); - asm volatile("std 15,%0" : "=Q" (state->fprs[15])); - } + if (flags & KERNEL_VXR_LOW) + save_fp_regs_vx(vxrs); return; } - - /* Test and save vector registers */ - asm volatile ( - /* - * Test if any vector register must be saved and, if so, - * test if all register can be saved. - */ - " la 1,%[vxrs]\n" /* load save area */ - " tmll %[m],30\n" /* KERNEL_VXR */ - " jz 7f\n" /* no work -> done */ - " jo 5f\n" /* -> save V0..V31 */ - /* - * Test for special case KERNEL_FPU_MID only. In this - * case a vstm V8..V23 is the best instruction - */ - " chi %[m],12\n" /* KERNEL_VXR_MID */ - " jne 0f\n" /* -> save V8..V23 */ - " VSTM 8,23,128,1\n" /* vstm %v8,%v23,128(%r1) */ - " j 7f\n" - /* Test and save the first half of 16 vector registers */ - "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */ - " jz 3f\n" /* -> KERNEL_VXR_HIGH */ - " jo 2f\n" /* 11 -> save V0..V15 */ - " brc 2,1f\n" /* 10 -> save V8..V15 */ - " VSTM 0,7,0,1\n" /* vstm %v0,%v7,0(%r1) */ - " j 3f\n" - "1: VSTM 8,15,128,1\n" /* vstm %v8,%v15,128(%r1) */ - " j 3f\n" - "2: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ - /* Test and save the second half of 16 vector registers */ - "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */ - " jz 7f\n" - " jo 6f\n" /* 11 -> save V16..V31 */ - " brc 2,4f\n" /* 10 -> save V24..V31 */ - " VSTM 16,23,256,1\n" /* vstm %v16,%v23,256(%r1) */ - " j 7f\n" - "4: VSTM 24,31,384,1\n" /* vstm %v24,%v31,384(%r1) */ - " j 7f\n" - "5: VSTM 0,15,0,1\n" /* vstm %v0,%v15,0(%r1) */ - "6: VSTM 16,31,256,1\n" /* vstm %v16,%v31,256(%r1) */ - "7:" - : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs) - : [m] "d" (flags) - : "1", "cc"); + mask = flags & KERNEL_VXR; + if (mask == KERNEL_VXR) { + vxrs += fpu_vstm(0, 15, vxrs); + vxrs += fpu_vstm(16, 31, vxrs); + return; + } + if (mask == KERNEL_VXR_MID) { + vxrs += fpu_vstm(8, 23, vxrs); + return; + } + mask = flags & KERNEL_VXR_LOW; + if (mask) { + if (mask == KERNEL_VXR_LOW) + vxrs += fpu_vstm(0, 15, vxrs); + else if (mask == KERNEL_VXR_V0V7) + vxrs += fpu_vstm(0, 7, vxrs); + else + vxrs += fpu_vstm(8, 15, vxrs); + } + mask = flags & KERNEL_VXR_HIGH; + if (mask) { + if (mask == KERNEL_VXR_HIGH) + vxrs += fpu_vstm(16, 31, vxrs); + else if (mask == KERNEL_VXR_V16V23) + vxrs += fpu_vstm(16, 23, vxrs); + else + vxrs += fpu_vstm(24, 31, vxrs); + } } EXPORT_SYMBOL(__kernel_fpu_begin); -void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) +void __kernel_fpu_end(struct kernel_fpu *state, int flags) { + __vector128 *vxrs = state->vxrs; + int mask; + /* * Limit the restore to the FPU/vector registers of the - * previous context that have been overwritte by the - * current context + * previous context that have been overwritten by the + * current context. */ - flags &= state->mask; - + flags &= state->hdr.mask; if (flags & KERNEL_FPC) - /* Restore floating-point controls */ - asm volatile("lfpc %0" : : "Q" (state->fpc)); - + fpu_lfpc(&state->hdr.fpc); if (!cpu_has_vx()) { - if (flags & KERNEL_VXR_V0V7) { - /* Restore floating-point registers */ - asm volatile("ld 0,%0" : : "Q" (state->fprs[0])); - asm volatile("ld 1,%0" : : "Q" (state->fprs[1])); - asm volatile("ld 2,%0" : : "Q" (state->fprs[2])); - asm volatile("ld 3,%0" : : "Q" (state->fprs[3])); - asm volatile("ld 4,%0" : : "Q" (state->fprs[4])); - asm volatile("ld 5,%0" : : "Q" (state->fprs[5])); - asm volatile("ld 6,%0" : : "Q" (state->fprs[6])); - asm volatile("ld 7,%0" : : "Q" (state->fprs[7])); - asm volatile("ld 8,%0" : : "Q" (state->fprs[8])); - asm volatile("ld 9,%0" : : "Q" (state->fprs[9])); - asm volatile("ld 10,%0" : : "Q" (state->fprs[10])); - asm volatile("ld 11,%0" : : "Q" (state->fprs[11])); - asm volatile("ld 12,%0" : : "Q" (state->fprs[12])); - asm volatile("ld 13,%0" : : "Q" (state->fprs[13])); - asm volatile("ld 14,%0" : : "Q" (state->fprs[14])); - asm volatile("ld 15,%0" : : "Q" (state->fprs[15])); - } + if (flags & KERNEL_VXR_LOW) + load_fp_regs_vx(vxrs); return; } - - /* Test and restore (load) vector registers */ - asm volatile ( - /* - * Test if any vector register must be loaded and, if so, - * test if all registers can be loaded at once. - */ - " la 1,%[vxrs]\n" /* load restore area */ - " tmll %[m],30\n" /* KERNEL_VXR */ - " jz 7f\n" /* no work -> done */ - " jo 5f\n" /* -> restore V0..V31 */ - /* - * Test for special case KERNEL_FPU_MID only. In this - * case a vlm V8..V23 is the best instruction - */ - " chi %[m],12\n" /* KERNEL_VXR_MID */ - " jne 0f\n" /* -> restore V8..V23 */ - " VLM 8,23,128,1\n" /* vlm %v8,%v23,128(%r1) */ - " j 7f\n" - /* Test and restore the first half of 16 vector registers */ - "0: tmll %[m],6\n" /* KERNEL_VXR_LOW */ - " jz 3f\n" /* -> KERNEL_VXR_HIGH */ - " jo 2f\n" /* 11 -> restore V0..V15 */ - " brc 2,1f\n" /* 10 -> restore V8..V15 */ - " VLM 0,7,0,1\n" /* vlm %v0,%v7,0(%r1) */ - " j 3f\n" - "1: VLM 8,15,128,1\n" /* vlm %v8,%v15,128(%r1) */ - " j 3f\n" - "2: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ - /* Test and restore the second half of 16 vector registers */ - "3: tmll %[m],24\n" /* KERNEL_VXR_HIGH */ - " jz 7f\n" - " jo 6f\n" /* 11 -> restore V16..V31 */ - " brc 2,4f\n" /* 10 -> restore V24..V31 */ - " VLM 16,23,256,1\n" /* vlm %v16,%v23,256(%r1) */ - " j 7f\n" - "4: VLM 24,31,384,1\n" /* vlm %v24,%v31,384(%r1) */ - " j 7f\n" - "5: VLM 0,15,0,1\n" /* vlm %v0,%v15,0(%r1) */ - "6: VLM 16,31,256,1\n" /* vlm %v16,%v31,256(%r1) */ - "7:" - : [vxrs] "=Q" (*(struct vx_array *) &state->vxrs) - : [m] "d" (flags) - : "1", "cc"); + mask = flags & KERNEL_VXR; + if (mask == KERNEL_VXR) { + vxrs += fpu_vlm(0, 15, vxrs); + vxrs += fpu_vlm(16, 31, vxrs); + return; + } + if (mask == KERNEL_VXR_MID) { + vxrs += fpu_vlm(8, 23, vxrs); + return; + } + mask = flags & KERNEL_VXR_LOW; + if (mask) { + if (mask == KERNEL_VXR_LOW) + vxrs += fpu_vlm(0, 15, vxrs); + else if (mask == KERNEL_VXR_V0V7) + vxrs += fpu_vlm(0, 7, vxrs); + else + vxrs += fpu_vlm(8, 15, vxrs); + } + mask = flags & KERNEL_VXR_HIGH; + if (mask) { + if (mask == KERNEL_VXR_HIGH) + vxrs += fpu_vlm(16, 31, vxrs); + else if (mask == KERNEL_VXR_V16V23) + vxrs += fpu_vlm(16, 23, vxrs); + else + vxrs += fpu_vlm(24, 31, vxrs); + } } EXPORT_SYMBOL(__kernel_fpu_end); -void __load_fpu_regs(void) +void load_fpu_state(struct fpu *state, int flags) { - unsigned long *regs = current->thread.fpu.regs; - struct fpu *state = ¤t->thread.fpu; + __vector128 *vxrs = &state->vxrs[0]; + int mask; - sfpc_safe(state->fpc); - if (likely(cpu_has_vx())) { - asm volatile("lgr 1,%0\n" - "VLM 0,15,0,1\n" - "VLM 16,31,256,1\n" - : - : "d" (regs) - : "1", "cc", "memory"); - } else { - asm volatile("ld 0,%0" : : "Q" (regs[0])); - asm volatile("ld 1,%0" : : "Q" (regs[1])); - asm volatile("ld 2,%0" : : "Q" (regs[2])); - asm volatile("ld 3,%0" : : "Q" (regs[3])); - asm volatile("ld 4,%0" : : "Q" (regs[4])); - asm volatile("ld 5,%0" : : "Q" (regs[5])); - asm volatile("ld 6,%0" : : "Q" (regs[6])); - asm volatile("ld 7,%0" : : "Q" (regs[7])); - asm volatile("ld 8,%0" : : "Q" (regs[8])); - asm volatile("ld 9,%0" : : "Q" (regs[9])); - asm volatile("ld 10,%0" : : "Q" (regs[10])); - asm volatile("ld 11,%0" : : "Q" (regs[11])); - asm volatile("ld 12,%0" : : "Q" (regs[12])); - asm volatile("ld 13,%0" : : "Q" (regs[13])); - asm volatile("ld 14,%0" : : "Q" (regs[14])); - asm volatile("ld 15,%0" : : "Q" (regs[15])); + if (flags & KERNEL_FPC) + fpu_lfpc(&state->fpc); + if (!cpu_has_vx()) { + if (flags & KERNEL_VXR_V0V7) + load_fp_regs_vx(state->vxrs); + return; + } + mask = flags & KERNEL_VXR; + if (mask == KERNEL_VXR) { + fpu_vlm(0, 15, &vxrs[0]); + fpu_vlm(16, 31, &vxrs[16]); + return; + } + if (mask == KERNEL_VXR_MID) { + fpu_vlm(8, 23, &vxrs[8]); + return; + } + mask = flags & KERNEL_VXR_LOW; + if (mask) { + if (mask == KERNEL_VXR_LOW) + fpu_vlm(0, 15, &vxrs[0]); + else if (mask == KERNEL_VXR_V0V7) + fpu_vlm(0, 7, &vxrs[0]); + else + fpu_vlm(8, 15, &vxrs[8]); + } + mask = flags & KERNEL_VXR_HIGH; + if (mask) { + if (mask == KERNEL_VXR_HIGH) + fpu_vlm(16, 31, &vxrs[16]); + else if (mask == KERNEL_VXR_V16V23) + fpu_vlm(16, 23, &vxrs[16]); + else + fpu_vlm(24, 31, &vxrs[24]); } - clear_cpu_flag(CIF_FPU); -} - -void load_fpu_regs(void) -{ - raw_local_irq_disable(); - __load_fpu_regs(); - raw_local_irq_enable(); } -EXPORT_SYMBOL(load_fpu_regs); -void save_fpu_regs(void) +void save_fpu_state(struct fpu *state, int flags) { - unsigned long flags, *regs; - struct fpu *state; - - local_irq_save(flags); + __vector128 *vxrs = &state->vxrs[0]; + int mask; - if (test_cpu_flag(CIF_FPU)) - goto out; - - state = ¤t->thread.fpu; - regs = current->thread.fpu.regs; - - asm volatile("stfpc %0" : "=Q" (state->fpc)); - if (likely(cpu_has_vx())) { - asm volatile("lgr 1,%0\n" - "VSTM 0,15,0,1\n" - "VSTM 16,31,256,1\n" - : - : "d" (regs) - : "1", "cc", "memory"); - } else { - asm volatile("std 0,%0" : "=Q" (regs[0])); - asm volatile("std 1,%0" : "=Q" (regs[1])); - asm volatile("std 2,%0" : "=Q" (regs[2])); - asm volatile("std 3,%0" : "=Q" (regs[3])); - asm volatile("std 4,%0" : "=Q" (regs[4])); - asm volatile("std 5,%0" : "=Q" (regs[5])); - asm volatile("std 6,%0" : "=Q" (regs[6])); - asm volatile("std 7,%0" : "=Q" (regs[7])); - asm volatile("std 8,%0" : "=Q" (regs[8])); - asm volatile("std 9,%0" : "=Q" (regs[9])); - asm volatile("std 10,%0" : "=Q" (regs[10])); - asm volatile("std 11,%0" : "=Q" (regs[11])); - asm volatile("std 12,%0" : "=Q" (regs[12])); - asm volatile("std 13,%0" : "=Q" (regs[13])); - asm volatile("std 14,%0" : "=Q" (regs[14])); - asm volatile("std 15,%0" : "=Q" (regs[15])); + if (flags & KERNEL_FPC) + fpu_stfpc(&state->fpc); + if (!cpu_has_vx()) { + if (flags & KERNEL_VXR_LOW) + save_fp_regs_vx(state->vxrs); + return; + } + mask = flags & KERNEL_VXR; + if (mask == KERNEL_VXR) { + fpu_vstm(0, 15, &vxrs[0]); + fpu_vstm(16, 31, &vxrs[16]); + return; + } + if (mask == KERNEL_VXR_MID) { + fpu_vstm(8, 23, &vxrs[8]); + return; + } + mask = flags & KERNEL_VXR_LOW; + if (mask) { + if (mask == KERNEL_VXR_LOW) + fpu_vstm(0, 15, &vxrs[0]); + else if (mask == KERNEL_VXR_V0V7) + fpu_vstm(0, 7, &vxrs[0]); + else + fpu_vstm(8, 15, &vxrs[8]); + } + mask = flags & KERNEL_VXR_HIGH; + if (mask) { + if (mask == KERNEL_VXR_HIGH) + fpu_vstm(16, 31, &vxrs[16]); + else if (mask == KERNEL_VXR_V16V23) + fpu_vstm(16, 23, &vxrs[16]); + else + fpu_vstm(24, 31, &vxrs[24]); } - set_cpu_flag(CIF_FPU); -out: - local_irq_restore(flags); } -EXPORT_SYMBOL(save_fpu_regs); +EXPORT_SYMBOL(save_fpu_state); diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index c46381ea04..7f6f8c438c 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -296,6 +296,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct kprobe *p; int bit; + if (unlikely(kprobe_ftrace_disabled)) + return; + bit = ftrace_test_recursion_trylock(ip, parent_ip); if (bit < 0) return; diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index ba75f6bee7..469e8d3fbf 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -962,8 +962,8 @@ static ssize_t reipl_nvme_scpdata_write(struct file *filp, struct kobject *kobj, scpdata_len += padding; } - reipl_block_nvme->hdr.len = IPL_BP_FCP_LEN + scpdata_len; - reipl_block_nvme->nvme.len = IPL_BP0_FCP_LEN + scpdata_len; + reipl_block_nvme->hdr.len = IPL_BP_NVME_LEN + scpdata_len; + reipl_block_nvme->nvme.len = IPL_BP0_NVME_LEN + scpdata_len; reipl_block_nvme->nvme.scp_data_len = scpdata_len; return count; @@ -1858,9 +1858,9 @@ static int __init dump_nvme_init(void) } dump_block_nvme->hdr.len = IPL_BP_NVME_LEN; dump_block_nvme->hdr.version = IPL_PARM_BLOCK_VERSION; - dump_block_nvme->fcp.len = IPL_BP0_NVME_LEN; - dump_block_nvme->fcp.pbt = IPL_PBT_NVME; - dump_block_nvme->fcp.opt = IPL_PB0_NVME_OPT_DUMP; + dump_block_nvme->nvme.len = IPL_BP0_NVME_LEN; + dump_block_nvme->nvme.pbt = IPL_PBT_NVME; + dump_block_nvme->nvme.opt = IPL_PB0_NVME_OPT_DUMP; dump_capabilities |= DUMP_TYPE_NVME; return 0; } @@ -1941,8 +1941,7 @@ static void dump_reipl_run(struct shutdown_trigger *trigger) reipl_type == IPL_TYPE_UNKNOWN) os_info_flags |= OS_INFO_FLAG_REIPL_CLEAR; os_info_entry_add(OS_INFO_FLAGS_ENTRY, &os_info_flags, sizeof(os_info_flags)); - csum = (__force unsigned int) - csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); + csum = (__force unsigned int)cksm(reipl_block_actual, reipl_block_actual->hdr.len, 0); abs_lc = get_abs_lowcore(); abs_lc->ipib = __pa(reipl_block_actual); abs_lc->ipib_checksum = csum; diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c index 9da6fa30c4..4d364de437 100644 --- a/arch/s390/kernel/kexec_elf.c +++ b/arch/s390/kernel/kexec_elf.c @@ -40,8 +40,10 @@ static int kexec_file_add_kernel_elf(struct kimage *image, buf.bufsz = phdr->p_filesz; buf.mem = ALIGN(phdr->p_paddr, phdr->p_align); +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) buf.mem += crashk_res.start; +#endif buf.memsz = phdr->p_memsz; data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz; diff --git a/arch/s390/kernel/kexec_image.c b/arch/s390/kernel/kexec_image.c index af23eff577..a32ce8bea7 100644 --- a/arch/s390/kernel/kexec_image.c +++ b/arch/s390/kernel/kexec_image.c @@ -24,8 +24,10 @@ static int kexec_file_add_kernel_image(struct kimage *image, buf.bufsz = image->kernel_buf_len; buf.mem = 0; +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) buf.mem += crashk_res.start; +#endif buf.memsz = buf.bufsz; data->kernel_buf = image->kernel_buf; diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index aa22ffc16b..3aee98efc3 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -13,8 +13,10 @@ #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/debug_locks.h> +#include <asm/guarded_storage.h> #include <asm/pfault.h> #include <asm/cio.h> +#include <asm/fpu.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/ipl.h> @@ -26,7 +28,6 @@ #include <asm/os_info.h> #include <asm/set_memory.h> #include <asm/stacktrace.h> -#include <asm/switch_to.h> #include <asm/nmi.h> #include <asm/sclp.h> @@ -209,21 +210,6 @@ void machine_kexec_cleanup(struct kimage *image) { } -void arch_crash_save_vmcoreinfo(void) -{ - struct lowcore *abs_lc; - - VMCOREINFO_SYMBOL(lowcore_ptr); - VMCOREINFO_SYMBOL(high_memory); - VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); - vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); - vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); - vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); - abs_lc = get_abs_lowcore(); - abs_lc->vmcore_info = paddr_vmcoreinfo_note(); - put_abs_lowcore(abs_lc); -} - void machine_shutdown(void) { } diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c index 8d207b82d9..c2bac14dd6 100644 --- a/arch/s390/kernel/machine_kexec_file.c +++ b/arch/s390/kernel/machine_kexec_file.c @@ -105,6 +105,7 @@ static int kexec_file_update_purgatory(struct kimage *image, if (ret) return ret; +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) { u64 crash_size; @@ -121,6 +122,7 @@ static int kexec_file_update_purgatory(struct kimage *image, sizeof(crash_size), false); } +#endif return ret; } @@ -134,8 +136,10 @@ static int kexec_file_add_purgatory(struct kimage *image, data->memsz = ALIGN(data->memsz, PAGE_SIZE); buf.mem = data->memsz; +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) buf.mem += crashk_res.start; +#endif ret = kexec_load_purgatory(image, &buf); if (ret) @@ -158,8 +162,10 @@ static int kexec_file_add_initrd(struct kimage *image, data->memsz = ALIGN(data->memsz, PAGE_SIZE); buf.mem = data->memsz; +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) buf.mem += crashk_res.start; +#endif buf.memsz = buf.bufsz; data->parm->initrd_start = data->memsz; @@ -223,8 +229,10 @@ static int kexec_file_add_ipl_report(struct kimage *image, data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr); *lc_ipl_parmblock_ptr = (__u32)buf.mem; +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) buf.mem += crashk_res.start; +#endif ret = kexec_add_buffer(&buf); out: @@ -268,10 +276,12 @@ void *kexec_file_add_components(struct kimage *image, memcpy(data.parm->command_line, image->cmdline_buf, image->cmdline_buf_len); +#ifdef CONFIG_CRASH_DUMP if (image->type == KEXEC_TYPE_CRASH) { data.parm->oldmem_base = crashk_res.start; data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1; } +#endif if (image->initrd_buf) { ret = kexec_file_add_initrd(image, &data); diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 9ad44c26d1..c77382a673 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c @@ -23,16 +23,14 @@ #include <linux/export.h> #include <asm/lowcore.h> #include <asm/ctlreg.h> +#include <asm/fpu.h> #include <asm/smp.h> #include <asm/stp.h> #include <asm/cputime.h> #include <asm/nmi.h> #include <asm/crw.h> -#include <asm/switch_to.h> #include <asm/asm-offsets.h> #include <asm/pai.h> -#include <asm/vx-insn.h> -#include <asm/fpu/api.h> struct mcck_struct { unsigned int kill_task : 1; @@ -204,133 +202,63 @@ void s390_handle_mcck(void) } } -/* - * returns 0 if register contents could be validated - * returns 1 otherwise +/** + * nmi_registers_valid - verify if registers are valid + * @mci: machine check interruption code + * + * Inspect a machine check interruption code and verify if all required + * registers are valid. For some registers the corresponding validity bit is + * ignored and the registers are set to the expected value. + * Returns true if all registers are valid, otherwise false. */ -static int notrace s390_validate_registers(union mci mci) +static bool notrace nmi_registers_valid(union mci mci) { - struct mcesa *mcesa; - void *fpt_save_area; union ctlreg2 cr2; - int kill_task; - u64 zero; - - kill_task = 0; - zero = 0; - - if (!mci.gr || !mci.fp) - kill_task = 1; - fpt_save_area = &S390_lowcore.floating_pt_save_area; - if (!mci.fc) { - kill_task = 1; - asm volatile( - " lfpc %0\n" - : - : "Q" (zero)); - } else { - asm volatile( - " lfpc %0\n" - : - : "Q" (S390_lowcore.fpt_creg_save_area)); - } - mcesa = __va(S390_lowcore.mcesad & MCESA_ORIGIN_MASK); - if (!cpu_has_vx()) { - /* Validate floating point registers */ - asm volatile( - " ld 0,0(%0)\n" - " ld 1,8(%0)\n" - " ld 2,16(%0)\n" - " ld 3,24(%0)\n" - " ld 4,32(%0)\n" - " ld 5,40(%0)\n" - " ld 6,48(%0)\n" - " ld 7,56(%0)\n" - " ld 8,64(%0)\n" - " ld 9,72(%0)\n" - " ld 10,80(%0)\n" - " ld 11,88(%0)\n" - " ld 12,96(%0)\n" - " ld 13,104(%0)\n" - " ld 14,112(%0)\n" - " ld 15,120(%0)\n" - : - : "a" (fpt_save_area) - : "memory"); - } else { - /* Validate vector registers */ - union ctlreg0 cr0; - - /* - * The vector validity must only be checked if not running a - * KVM guest. For KVM guests the machine check is forwarded by - * KVM and it is the responsibility of the guest to take - * appropriate actions. The host vector or FPU values have been - * saved by KVM and will be restored by KVM. - */ - if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) - kill_task = 1; - cr0.reg = S390_lowcore.cregs_save_area[0]; - cr0.afp = cr0.vx = 1; - local_ctl_load(0, &cr0.reg); - asm volatile( - " la 1,%0\n" - " VLM 0,15,0,1\n" - " VLM 16,31,256,1\n" - : - : "Q" (*(struct vx_array *)mcesa->vector_save_area) - : "1"); - local_ctl_load(0, &S390_lowcore.cregs_save_area[0]); - } - /* Validate access registers */ - asm volatile( - " lam 0,15,0(%0)\n" - : - : "a" (&S390_lowcore.access_regs_save_area) - : "memory"); - if (!mci.ar) - kill_task = 1; - /* Validate guarded storage registers */ - cr2.reg = S390_lowcore.cregs_save_area[2]; - if (cr2.gse) { - if (!mci.gs) { - /* - * 2 cases: - * - machine check in kernel or userspace - * - machine check while running SIE (KVM guest) - * For kernel or userspace the userspace values of - * guarded storage control can not be recreated, the - * process must be terminated. - * For SIE the guest values of guarded storage can not - * be recreated. This is either due to a bug or due to - * GS being disabled in the guest. The guest will be - * notified by KVM code and the guests machine check - * handling must take care of this. The host values - * are saved by KVM and are not affected. - */ - if (!test_cpu_flag(CIF_MCCK_GUEST)) - kill_task = 1; - } else { - load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area); - } - } /* - * The getcpu vdso syscall reads CPU number from the programmable + * The getcpu vdso syscall reads the CPU number from the programmable * field of the TOD clock. Disregard the TOD programmable register - * validity bit and load the CPU number into the TOD programmable - * field unconditionally. + * validity bit and load the CPU number into the TOD programmable field + * unconditionally. */ set_tod_programmable_field(raw_smp_processor_id()); - /* Validate clock comparator register */ + /* + * Set the clock comparator register to the next expected value. + */ set_clock_comparator(S390_lowcore.clock_comparator); - + if (!mci.gr || !mci.fp || !mci.fc) + return false; + /* + * The vector validity must only be checked if not running a + * KVM guest. For KVM guests the machine check is forwarded by + * KVM and it is the responsibility of the guest to take + * appropriate actions. The host vector or FPU values have been + * saved by KVM and will be restored by KVM. + */ + if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) + return false; + if (!mci.ar) + return false; + /* + * Two cases for guarded storage registers: + * - machine check in kernel or userspace + * - machine check while running SIE (KVM guest) + * For kernel or userspace the userspace values of guarded storage + * control can not be recreated, the process must be terminated. + * For SIE the guest values of guarded storage can not be recreated. + * This is either due to a bug or due to GS being disabled in the + * guest. The guest will be notified by KVM code and the guests machine + * check handling must take care of this. The host values are saved by + * KVM and are not affected. + */ + cr2.reg = S390_lowcore.cregs_save_area[2]; + if (cr2.gse && !mci.gs && !test_cpu_flag(CIF_MCCK_GUEST)) + return false; if (!mci.ms || !mci.pm || !mci.ia) - kill_task = 1; - - return kill_task; + return false; + return true; } -NOKPROBE_SYMBOL(s390_validate_registers); +NOKPROBE_SYMBOL(nmi_registers_valid); /* * Backup the guest's machine check info to its description block @@ -428,7 +356,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) s390_handle_damage(); } } - if (s390_validate_registers(mci)) { + if (!nmi_registers_valid(mci)) { if (!user_mode(regs)) s390_handle_damage(); /* diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c index 6e1824141b..a801e6bd53 100644 --- a/arch/s390/kernel/os_info.c +++ b/arch/s390/kernel/os_info.c @@ -29,7 +29,7 @@ static struct os_info os_info __page_aligned_data; u32 os_info_csum(struct os_info *os_info) { int size = sizeof(*os_info) - offsetof(struct os_info, version_major); - return (__force u32)csum_partial(&os_info->version_major, size, 0); + return (__force u32)cksm(&os_info->version_major, size, 0); } /* @@ -49,7 +49,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size) { os_info.entry[nr].addr = __pa(ptr); os_info.entry[nr].size = size; - os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0); + os_info.entry[nr].csum = (__force u32)cksm(ptr, size, 0); os_info.csum = os_info_csum(&os_info); } @@ -98,7 +98,7 @@ static void os_info_old_alloc(int nr, int align) msg = "copy failed"; goto fail_free; } - csum = (__force u32)csum_partial(buf_align, size, 0); + csum = (__force u32)cksm(buf_align, size, 0); if (csum != os_info_old->entry[nr].csum) { msg = "checksum failed"; goto fail_free; diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index dfa77da2fd..5fff629b1a 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -218,39 +218,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) { - struct stack_frame_user __user *sf; - unsigned long ip, sp; - bool first = true; - - if (is_compat_task()) - return; - perf_callchain_store(entry, instruction_pointer(regs)); - sf = (void __user *)user_stack_pointer(regs); - pagefault_disable(); - while (entry->nr < entry->max_stack) { - if (__get_user(sp, &sf->back_chain)) - break; - if (__get_user(ip, &sf->gprs[8])) - break; - if (ip & 0x1) { - /* - * If the instruction address is invalid, and this - * is the first stack frame, assume r14 has not - * been written to the stack yet. Otherwise exit. - */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else - break; - } - perf_callchain_store(entry, ip); - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; - first = false; - } - pagefault_enable(); + arch_stack_walk_user_common(NULL, NULL, entry, regs, true); } /* Perf definitions for PMU event attributes in sysfs */ diff --git a/arch/s390/kernel/perf_pai_crypto.c b/arch/s390/kernel/perf_pai_crypto.c index 335e3f5d71..4ad472d130 100644 --- a/arch/s390/kernel/perf_pai_crypto.c +++ b/arch/s390/kernel/perf_pai_crypto.c @@ -97,6 +97,7 @@ static void paicrypt_event_destroy(struct perf_event *event) event->attr.config, event->cpu, cpump->active_events, cpump->mode, refcount_read(&cpump->refcnt)); + free_page(PAI_SAVE_AREA(event)); if (refcount_dec_and_test(&cpump->refcnt)) { debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", __func__, (unsigned long)cpump->page, @@ -259,6 +260,7 @@ static int paicrypt_event_init(struct perf_event *event) { struct perf_event_attr *a = &event->attr; struct paicrypt_map *cpump; + int rc = 0; /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) @@ -273,10 +275,21 @@ static int paicrypt_event_init(struct perf_event *event) /* Allow only CRYPTO_ALL for sampling. */ if (a->sample_period && a->config != PAI_CRYPTO_BASE) return -EINVAL; + /* Get a page to store last counter values for sampling */ + if (a->sample_period) { + PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); + if (!PAI_SAVE_AREA(event)) { + rc = -ENOMEM; + goto out; + } + } cpump = paicrypt_busy(event); - if (IS_ERR(cpump)) - return PTR_ERR(cpump); + if (IS_ERR(cpump)) { + free_page(PAI_SAVE_AREA(event)); + rc = PTR_ERR(cpump); + goto out; + } event->destroy = paicrypt_event_destroy; @@ -292,7 +305,8 @@ static int paicrypt_event_init(struct perf_event *event) } static_branch_inc(&pai_key); - return 0; +out: + return rc; } static void paicrypt_read(struct perf_event *event) @@ -309,20 +323,15 @@ static void paicrypt_read(struct perf_event *event) static void paicrypt_start(struct perf_event *event, int flags) { + struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr); + struct paicrypt_map *cpump = mp->mapptr; u64 sum; - /* Event initialization sets last_tag to 0. When later on the events - * are deleted and re-added, do not reset the event count value to zero. - * Events are added, deleted and re-added when 2 or more events - * are active at the same time. - */ if (!event->attr.sample_period) { /* Counting */ - if (!event->hw.last_tag) { - event->hw.last_tag = 1; - sum = paicrypt_getall(event); /* Get current value */ - local64_set(&event->hw.prev_count, sum); - } + sum = paicrypt_getall(event); /* Get current value */ + local64_set(&event->hw.prev_count, sum); } else { /* Sampling */ + cpump->event = event; perf_sched_cb_inc(event->pmu); } } @@ -338,7 +347,6 @@ static int paicrypt_add(struct perf_event *event, int flags) WRITE_ONCE(S390_lowcore.ccd, ccd); local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT); } - cpump->event = event; if (flags & PERF_EF_START) paicrypt_start(event, PERF_EF_RELOAD); event->hw.state = 0; @@ -371,23 +379,34 @@ static void paicrypt_del(struct perf_event *event, int flags) } } -/* Create raw data and save it in buffer. Returns number of bytes copied. - * Saves only positive counter entries of the form +/* Create raw data and save it in buffer. Calculate the delta for each + * counter between this invocation and the last invocation. + * Returns number of bytes copied. + * Saves only entries with positive counter difference of the form * 2 bytes: Number of counter * 8 bytes: Value of counter */ static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page, - bool exclude_user, bool exclude_kernel) + unsigned long *page_old, bool exclude_user, + bool exclude_kernel) { int i, outidx = 0; for (i = 1; i <= paicrypt_cnt; i++) { - u64 val = 0; + u64 val = 0, val_old = 0; - if (!exclude_kernel) + if (!exclude_kernel) { val += paicrypt_getctr(page, i, true); - if (!exclude_user) + val_old += paicrypt_getctr(page_old, i, true); + } + if (!exclude_user) { val += paicrypt_getctr(page, i, false); + val_old += paicrypt_getctr(page_old, i, false); + } + if (val >= val_old) + val -= val_old; + else + val = (~0ULL - val_old) + val + 1; if (val) { userdata[outidx].num = i; userdata[outidx].value = val; @@ -430,8 +449,8 @@ static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump, overflow = perf_event_overflow(event, &data, ®s); perf_event_update_userpage(event); - /* Clear lowcore page after read */ - memset(cpump->page, 0, PAGE_SIZE); + /* Save crypto counter lowcore page after reading event data. */ + memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE); return overflow; } @@ -447,6 +466,7 @@ static int paicrypt_have_sample(void) if (!event) /* No event active */ return 0; rawsize = paicrypt_copy(cpump->save, cpump->page, + (unsigned long *)PAI_SAVE_AREA(event), cpump->event->attr.exclude_user, cpump->event->attr.exclude_kernel); if (rawsize) /* No incremented counters */ @@ -698,6 +718,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num) { struct perf_pmu_events_attr *pa; + /* Index larger than array_size, no counter name available */ + if (num >= ARRAY_SIZE(paicrypt_ctrnames)) { + attrs[num] = NULL; + return 0; + } + pa = kzalloc(sizeof(*pa), GFP_KERNEL); if (!pa) return -ENOMEM; @@ -718,11 +744,10 @@ static int __init attr_event_init(void) struct attribute **attrs; int ret, i; - attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs), - GFP_KERNEL); + attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL); if (!attrs) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) { + for (i = 0; i <= paicrypt_cnt; i++) { ret = attr_event_init_one(attrs, i); if (ret) { attr_event_free(attrs, i); @@ -746,8 +771,10 @@ static int __init paicrypt_init(void) paicrypt_cnt = ib.num_cc; if (paicrypt_cnt == 0) return 0; - if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) - paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1; + if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) { + pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt); + return -E2BIG; + } rc = attr_event_init(); /* Export known PAI crypto events */ if (rc) { diff --git a/arch/s390/kernel/perf_pai_ext.c b/arch/s390/kernel/perf_pai_ext.c index db37c38ddc..a6da7e0cc7 100644 --- a/arch/s390/kernel/perf_pai_ext.c +++ b/arch/s390/kernel/perf_pai_ext.c @@ -120,6 +120,7 @@ static void paiext_event_destroy(struct perf_event *event) struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, event->cpu); struct paiext_map *cpump = mp->mapptr; + free_page(PAI_SAVE_AREA(event)); mutex_lock(&paiext_reserve_mutex); if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */ paiext_free(mp); @@ -201,7 +202,6 @@ static int paiext_alloc(struct perf_event_attr *a, struct perf_event *event) } rc = 0; - cpump->event = event; undo: if (rc) { @@ -255,10 +255,18 @@ static int paiext_event_init(struct perf_event *event) /* Prohibit exclude_user event selection */ if (a->exclude_user) return -EINVAL; + /* Get a page to store last counter values for sampling */ + if (a->sample_period) { + PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL); + if (!PAI_SAVE_AREA(event)) + return -ENOMEM; + } rc = paiext_alloc(a, event); - if (rc) + if (rc) { + free_page(PAI_SAVE_AREA(event)); return rc; + } event->destroy = paiext_event_destroy; if (a->sample_period) { @@ -318,15 +326,15 @@ static void paiext_read(struct perf_event *event) static void paiext_start(struct perf_event *event, int flags) { + struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr); + struct paiext_map *cpump = mp->mapptr; u64 sum; if (!event->attr.sample_period) { /* Counting */ - if (!event->hw.last_tag) { - event->hw.last_tag = 1; - sum = paiext_getall(event); /* Get current value */ - local64_set(&event->hw.prev_count, sum); - } + sum = paiext_getall(event); /* Get current value */ + local64_set(&event->hw.prev_count, sum); } else { /* Sampling */ + cpump->event = event; perf_sched_cb_inc(event->pmu); } } @@ -345,7 +353,6 @@ static int paiext_add(struct perf_event *event, int flags) debug_sprintf_event(paiext_dbg, 4, "%s 1508 %llx acc %llx\n", __func__, S390_lowcore.aicd, pcb->acc); } - cpump->event = event; if (flags & PERF_EF_START) paiext_start(event, PERF_EF_RELOAD); event->hw.state = 0; @@ -388,13 +395,19 @@ static void paiext_del(struct perf_event *event, int flags) * 2 bytes: Number of counter * 8 bytes: Value of counter */ -static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area) +static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area, + unsigned long *area_old) { int i, outidx = 0; for (i = 1; i <= paiext_cnt; i++) { u64 val = paiext_getctr(area, i); + u64 val_old = paiext_getctr(area_old, i); + if (val >= val_old) + val -= val_old; + else + val = (~0ULL - val_old) + val + 1; if (val) { userdata[outidx].num = i; userdata[outidx].value = val; @@ -450,8 +463,9 @@ static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump, overflow = perf_event_overflow(event, &data, ®s); perf_event_update_userpage(event); - /* Clear lowcore area after read */ - memset(cpump->area, 0, PAIE1_CTRBLOCK_SZ); + /* Save NNPA lowcore area after read in event */ + memcpy((void *)PAI_SAVE_AREA(event), cpump->area, + PAIE1_CTRBLOCK_SZ); return overflow; } @@ -466,7 +480,8 @@ static int paiext_have_sample(void) if (!event) return 0; - rawsize = paiext_copy(cpump->save, cpump->area); + rawsize = paiext_copy(cpump->save, cpump->area, + (unsigned long *)PAI_SAVE_AREA(event)); if (rawsize) /* Incremented counters */ rc = paiext_push_sample(rawsize, cpump, event); return rc; @@ -588,6 +603,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num) { struct perf_pmu_events_attr *pa; + /* Index larger than array_size, no counter name available */ + if (num >= ARRAY_SIZE(paiext_ctrnames)) { + attrs[num] = NULL; + return 0; + } + pa = kzalloc(sizeof(*pa), GFP_KERNEL); if (!pa) return -ENOMEM; @@ -608,11 +629,10 @@ static int __init attr_event_init(void) struct attribute **attrs; int ret, i; - attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs), - GFP_KERNEL); + attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL); if (!attrs) return -ENOMEM; - for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) { + for (i = 0; i <= paiext_cnt; i++) { ret = attr_event_init_one(attrs, i); if (ret) { attr_event_free(attrs, i); diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c index 3d93656bd9..a6b058ee4a 100644 --- a/arch/s390/kernel/perf_regs.c +++ b/arch/s390/kernel/perf_regs.c @@ -5,8 +5,7 @@ #include <linux/errno.h> #include <linux/bug.h> #include <asm/ptrace.h> -#include <asm/fpu/api.h> -#include <asm/fpu/types.h> +#include <asm/fpu.h> u64 perf_reg_value(struct pt_regs *regs, int idx) { @@ -20,10 +19,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; idx -= PERF_REG_S390_FP0; - if (cpu_has_vx()) - fp = *(freg_t *)(current->thread.fpu.vxrs + idx); - else - fp = current->thread.fpu.fprs[idx]; + fp = *(freg_t *)(current->thread.ufpu.vxrs + idx); return fp.ui; } @@ -65,6 +61,6 @@ void perf_get_regs_user(struct perf_regs *regs_user, */ regs_user->regs = task_pt_regs(current); if (user_mode(regs_user->regs)) - save_fpu_regs(); + save_user_fpu_regs(); regs_user->abi = perf_reg_abi(current); } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 4e3b366589..dd456b4758 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -31,15 +31,19 @@ #include <linux/init_task.h> #include <linux/entry-common.h> #include <linux/io.h> +#include <asm/guarded_storage.h> +#include <asm/access-regs.h> +#include <asm/switch_to.h> #include <asm/cpu_mf.h> #include <asm/processor.h> +#include <asm/ptrace.h> #include <asm/vtimer.h> #include <asm/exec.h> +#include <asm/fpu.h> #include <asm/irq.h> #include <asm/nmi.h> #include <asm/smp.h> #include <asm/stacktrace.h> -#include <asm/switch_to.h> #include <asm/runtime_instr.h> #include <asm/unwind.h> #include "entry.h" @@ -84,13 +88,13 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { /* * Save the floating-point or vector register state of the current - * task and set the CIF_FPU flag to lazy restore the FPU register + * task and set the TIF_FPU flag to lazy restore the FPU register * state when returning to user space. */ - save_fpu_regs(); + save_user_fpu_regs(); *dst = *src; - dst->thread.fpu.regs = dst->thread.fpu.fprs; + dst->thread.kfpu_flags = 0; /* * Don't transfer over the runtime instrumentation or the guarded @@ -186,8 +190,23 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) void execve_tail(void) { - current->thread.fpu.fpc = 0; - asm volatile("sfpc %0" : : "d" (0)); + current->thread.ufpu.fpc = 0; + fpu_sfpc(0); +} + +struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) +{ + save_user_fpu_regs(); + save_kernel_fpu_regs(&prev->thread); + save_access_regs(&prev->thread.acrs[0]); + save_ri_cb(prev->thread.ri_cb); + save_gs_cb(prev->thread.gs_cb); + update_cr_regs(next); + restore_kernel_fpu_regs(&next->thread); + restore_access_regs(&next->thread.acrs[0]); + restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); + restore_gs_cb(next->thread.gs_cb); + return __switch_to_asm(prev, next); } unsigned long __get_wchan(struct task_struct *p) diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index f1897a8bb2..1cfed8b710 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -24,13 +24,14 @@ #include <linux/seccomp.h> #include <linux/compat.h> #include <trace/syscall.h> +#include <asm/guarded_storage.h> +#include <asm/access-regs.h> #include <asm/page.h> #include <linux/uaccess.h> #include <asm/unistd.h> -#include <asm/switch_to.h> #include <asm/runtime_instr.h> #include <asm/facility.h> -#include <asm/fpu/api.h> +#include <asm/fpu.h> #include "entry.h" @@ -246,22 +247,15 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fpu.fpc; + tmp = child->thread.ufpu.fpc; tmp <<= BITS_PER_LONG - 32; } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are in the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct user, regs.fp_regs.fprs); - if (cpu_has_vx()) - tmp = *(addr_t *) - ((addr_t) child->thread.fpu.vxrs + 2*offset); - else - tmp = *(addr_t *) - ((addr_t) child->thread.fpu.fprs + offset); - + tmp = *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset); } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { /* * Handle access to the per_info structure. @@ -395,21 +389,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) */ if ((unsigned int)data != 0) return -EINVAL; - child->thread.fpu.fpc = data >> (BITS_PER_LONG - 32); + child->thread.ufpu.fpc = data >> (BITS_PER_LONG - 32); } else if (addr < offsetof(struct user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are in the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct user, regs.fp_regs.fprs); - if (cpu_has_vx()) - *(addr_t *)((addr_t) - child->thread.fpu.vxrs + 2*offset) = data; - else - *(addr_t *)((addr_t) - child->thread.fpu.fprs + offset) = data; - + *(addr_t *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = data; } else if (addr < offsetof(struct user, regs.per_info) + sizeof(per_struct)) { /* * Handle access to the per_info structure. @@ -622,21 +609,14 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr) /* * floating point control reg. is in the thread structure */ - tmp = child->thread.fpu.fpc; + tmp = child->thread.ufpu.fpc; } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are in the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); - if (cpu_has_vx()) - tmp = *(__u32 *) - ((addr_t) child->thread.fpu.vxrs + 2*offset); - else - tmp = *(__u32 *) - ((addr_t) child->thread.fpu.fprs + offset); - + tmp = *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset); } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { /* * Handle access to the per_info structure. @@ -748,21 +728,14 @@ static int __poke_user_compat(struct task_struct *child, /* * floating point control reg. is in the thread structure */ - child->thread.fpu.fpc = data; + child->thread.ufpu.fpc = data; } else if (addr < offsetof(struct compat_user, regs.fp_regs) + sizeof(s390_fp_regs)) { /* - * floating point regs. are either in child->thread.fpu - * or the child->thread.fpu.vxrs array + * floating point regs. are in the child->thread.ufpu.vxrs array */ offset = addr - offsetof(struct compat_user, regs.fp_regs.fprs); - if (cpu_has_vx()) - *(__u32 *)((addr_t) - child->thread.fpu.vxrs + 2*offset) = tmp; - else - *(__u32 *)((addr_t) - child->thread.fpu.fprs + offset) = tmp; - + *(__u32 *)((addr_t)child->thread.ufpu.vxrs + 2 * offset) = tmp; } else if (addr < offsetof(struct compat_user, regs.per_info) + sizeof(struct compat_per_struct_kernel)) { /* * Handle access to the per_info structure. @@ -893,10 +866,10 @@ static int s390_fpregs_get(struct task_struct *target, _s390_fp_regs fp_regs; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); - fp_regs.fpc = target->thread.fpu.fpc; - fpregs_store(&fp_regs, &target->thread.fpu); + fp_regs.fpc = target->thread.ufpu.fpc; + fpregs_store(&fp_regs, &target->thread.ufpu); return membuf_write(&to, &fp_regs, sizeof(fp_regs)); } @@ -910,22 +883,17 @@ static int s390_fpregs_set(struct task_struct *target, freg_t fprs[__NUM_FPRS]; if (target == current) - save_fpu_regs(); - - if (cpu_has_vx()) - convert_vx_to_fp(fprs, target->thread.fpu.vxrs); - else - memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs)); - + save_user_fpu_regs(); + convert_vx_to_fp(fprs, target->thread.ufpu.vxrs); if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) { - u32 ufpc[2] = { target->thread.fpu.fpc, 0 }; + u32 ufpc[2] = { target->thread.ufpu.fpc, 0 }; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ufpc, 0, offsetof(s390_fp_regs, fprs)); if (rc) return rc; if (ufpc[1] != 0) return -EINVAL; - target->thread.fpu.fpc = ufpc[0]; + target->thread.ufpu.fpc = ufpc[0]; } if (rc == 0 && count > 0) @@ -933,12 +901,7 @@ static int s390_fpregs_set(struct task_struct *target, fprs, offsetof(s390_fp_regs, fprs), -1); if (rc) return rc; - - if (cpu_has_vx()) - convert_fp_to_vx(target->thread.fpu.vxrs, fprs); - else - memcpy(target->thread.fpu.fprs, &fprs, sizeof(fprs)); - + convert_fp_to_vx(target->thread.ufpu.vxrs, fprs); return rc; } @@ -988,9 +951,9 @@ static int s390_vxrs_low_get(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = target->thread.fpu.vxrs[i].low; + vxrs[i] = target->thread.ufpu.vxrs[i].low; return membuf_write(&to, vxrs, sizeof(vxrs)); } @@ -1005,15 +968,15 @@ static int s390_vxrs_low_set(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = target->thread.fpu.vxrs[i].low; + vxrs[i] = target->thread.ufpu.vxrs[i].low; rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1); if (rc == 0) for (i = 0; i < __NUM_VXRS_LOW; i++) - target->thread.fpu.vxrs[i].low = vxrs[i]; + target->thread.ufpu.vxrs[i].low = vxrs[i]; return rc; } @@ -1025,8 +988,8 @@ static int s390_vxrs_high_get(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); - return membuf_write(&to, target->thread.fpu.vxrs + __NUM_VXRS_LOW, + save_user_fpu_regs(); + return membuf_write(&to, target->thread.ufpu.vxrs + __NUM_VXRS_LOW, __NUM_VXRS_HIGH * sizeof(__vector128)); } @@ -1040,10 +1003,10 @@ static int s390_vxrs_high_set(struct task_struct *target, if (!cpu_has_vx()) return -ENODEV; if (target == current) - save_fpu_regs(); + save_user_fpu_regs(); rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, - target->thread.fpu.vxrs + __NUM_VXRS_LOW, 0, -1); + target->thread.ufpu.vxrs + __NUM_VXRS_LOW, 0, -1); return rc; } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index d1f3b56e7a..7ecd27c62d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -155,7 +155,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support); EXPORT_SYMBOL(zlib_dfltcc_support); u64 __bootdata_preserved(stfle_fac_list[16]); EXPORT_SYMBOL(stfle_fac_list); -u64 __bootdata_preserved(alt_stfle_fac_list[16]); +u64 alt_stfle_fac_list[16]; struct oldmem_data __bootdata_preserved(oldmem_data); unsigned long VMALLOC_START; @@ -504,12 +504,12 @@ static void __init setup_resources(void) int j; u64 i; - code_resource.start = (unsigned long) _text; - code_resource.end = (unsigned long) _etext - 1; - data_resource.start = (unsigned long) _etext; - data_resource.end = (unsigned long) _edata - 1; - bss_resource.start = (unsigned long) __bss_start; - bss_resource.end = (unsigned long) __bss_stop - 1; + code_resource.start = __pa_symbol(_text); + code_resource.end = __pa_symbol(_etext) - 1; + data_resource.start = __pa_symbol(_etext); + data_resource.end = __pa_symbol(_edata) - 1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop) - 1; for_each_mem_range(i, &start, &end) { res = memblock_alloc(sizeof(*res), 8); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 43e9661cd7..6c2cb34540 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -30,8 +30,8 @@ #include <linux/compat.h> #include <asm/ucontext.h> #include <linux/uaccess.h> +#include <asm/access-regs.h> #include <asm/lowcore.h> -#include <asm/switch_to.h> #include <asm/vdso.h> #include "entry.h" @@ -109,7 +109,7 @@ struct rt_sigframe static void store_sigregs(void) { save_access_regs(current->thread.acrs); - save_fpu_regs(); + save_user_fpu_regs(); } /* Load registers after signal return */ @@ -131,7 +131,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs)); memcpy(&user_sregs.regs.acrs, current->thread.acrs, sizeof(user_sregs.regs.acrs)); - fpregs_store(&user_sregs.fpregs, ¤t->thread.fpu); + fpregs_store(&user_sregs.fpregs, ¤t->thread.ufpu); if (__copy_to_user(sregs, &user_sregs, sizeof(_sigregs))) return -EFAULT; return 0; @@ -165,7 +165,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) memcpy(¤t->thread.acrs, &user_sregs.regs.acrs, sizeof(current->thread.acrs)); - fpregs_load(&user_sregs.fpregs, ¤t->thread.fpu); + fpregs_load(&user_sregs.fpregs, ¤t->thread.ufpu); clear_pt_regs_flag(regs, PIF_SYSCALL); /* No longer in a system call */ return 0; @@ -181,11 +181,11 @@ static int save_sigregs_ext(struct pt_regs *regs, /* Save vector registers to signal stack */ if (cpu_has_vx()) { for (i = 0; i < __NUM_VXRS_LOW; i++) - vxrs[i] = current->thread.fpu.vxrs[i].low; + vxrs[i] = current->thread.ufpu.vxrs[i].low; if (__copy_to_user(&sregs_ext->vxrs_low, vxrs, sizeof(sregs_ext->vxrs_low)) || __copy_to_user(&sregs_ext->vxrs_high, - current->thread.fpu.vxrs + __NUM_VXRS_LOW, + current->thread.ufpu.vxrs + __NUM_VXRS_LOW, sizeof(sregs_ext->vxrs_high))) return -EFAULT; } @@ -202,12 +202,12 @@ static int restore_sigregs_ext(struct pt_regs *regs, if (cpu_has_vx()) { if (__copy_from_user(vxrs, &sregs_ext->vxrs_low, sizeof(sregs_ext->vxrs_low)) || - __copy_from_user(current->thread.fpu.vxrs + __NUM_VXRS_LOW, + __copy_from_user(current->thread.ufpu.vxrs + __NUM_VXRS_LOW, &sregs_ext->vxrs_high, sizeof(sregs_ext->vxrs_high))) return -EFAULT; for (i = 0; i < __NUM_VXRS_LOW; i++) - current->thread.fpu.vxrs[i].low = vxrs[i]; + current->thread.ufpu.vxrs[i].low = vxrs[i]; } return 0; } @@ -222,7 +222,7 @@ SYSCALL_DEFINE0(sigreturn) if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) goto badframe; set_current_blocked(&set); - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs(regs, &frame->sregs)) goto badframe; if (restore_sigregs_ext(regs, &frame->sregs_ext)) @@ -246,7 +246,7 @@ SYSCALL_DEFINE0(rt_sigreturn) set_current_blocked(&set); if (restore_altstack(&frame->uc.uc_stack)) goto badframe; - save_fpu_regs(); + save_user_fpu_regs(); if (restore_sigregs(regs, &frame->uc.uc_mcontext)) goto badframe; if (restore_sigregs_ext(regs, &frame->uc.uc_mcontext_ext)) diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index c39d9f0d4b..0324649aae 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -36,12 +36,13 @@ #include <linux/sched/task_stack.h> #include <linux/crash_dump.h> #include <linux/kprobes.h> +#include <asm/access-regs.h> #include <asm/asm-offsets.h> #include <asm/ctlreg.h> #include <asm/pfault.h> #include <asm/diag.h> -#include <asm/switch_to.h> #include <asm/facility.h> +#include <asm/fpu.h> #include <asm/ipl.h> #include <asm/setup.h> #include <asm/irq.h> diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 94f440e383..640363b2a1 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright IBM Corp. 2006 */ +#include <linux/perf_event.h> #include <linux/stacktrace.h> #include <linux/uaccess.h> #include <linux/compat.h> @@ -62,42 +63,121 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, return 0; } -void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, - const struct pt_regs *regs) +static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, bool perf, + unsigned long ip) +{ +#ifdef CONFIG_PERF_EVENTS + if (perf) { + if (perf_callchain_store(entry, ip)) + return false; + return true; + } +#endif + return consume_entry(cookie, ip); +} + +static inline bool ip_invalid(unsigned long ip) +{ + /* + * Perform some basic checks if an instruction address taken + * from unreliable source is invalid. + */ + if (ip & 1) + return true; + if (ip < mmap_min_addr) + return true; + if (ip >= current->mm->context.asce_limit) + return true; + return false; +} + +static inline bool ip_within_vdso(unsigned long ip) { + return in_range(ip, current->mm->context.vdso_base, vdso_text_size()); +} + +void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, + struct perf_callchain_entry_ctx *entry, + const struct pt_regs *regs, bool perf) +{ + struct stack_frame_vdso_wrapper __user *sf_vdso; struct stack_frame_user __user *sf; unsigned long ip, sp; bool first = true; if (is_compat_task()) return; - if (!consume_entry(cookie, instruction_pointer(regs))) + if (!current->mm) + return; + ip = instruction_pointer(regs); + if (!store_ip(consume_entry, cookie, entry, perf, ip)) return; sf = (void __user *)user_stack_pointer(regs); pagefault_disable(); while (1) { if (__get_user(sp, &sf->back_chain)) break; - if (__get_user(ip, &sf->gprs[8])) + /* + * VDSO entry code has a non-standard stack frame layout. + * See VDSO user wrapper code for details. + */ + if (!sp && ip_within_vdso(ip)) { + sf_vdso = (void __user *)sf; + if (__get_user(ip, &sf_vdso->return_address)) + break; + sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD; + sf = (void __user *)sp; + if (__get_user(sp, &sf->back_chain)) + break; + } else { + sf = (void __user *)sp; + if (__get_user(ip, &sf->gprs[8])) + break; + } + /* Sanity check: ABI requires SP to be 8 byte aligned. */ + if (sp & 0x7) break; - if (ip & 0x1) { + if (ip_invalid(ip)) { /* * If the instruction address is invalid, and this * is the first stack frame, assume r14 has not * been written to the stack yet. Otherwise exit. */ - if (first && !(regs->gprs[14] & 0x1)) - ip = regs->gprs[14]; - else + if (!first) + break; + ip = regs->gprs[14]; + if (ip_invalid(ip)) break; } - if (!consume_entry(cookie, ip)) - break; - /* Sanity check: ABI requires SP to be aligned 8 bytes. */ - if (!sp || sp & 0x7) - break; - sf = (void __user *)sp; + if (!store_ip(consume_entry, cookie, entry, perf, ip)) + return; first = false; } pagefault_enable(); } + +void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie, + const struct pt_regs *regs) +{ + arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false); +} + +unsigned long return_address(unsigned int n) +{ + struct unwind_state state; + unsigned long addr; + + /* Increment to skip current stack entry */ + n++; + + unwind_for_each_frame(&state, NULL, NULL, 0) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + if (!n--) + return addr; + } + return 0; +} +EXPORT_SYMBOL_GPL(return_address); diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c index f6f8f498c9..2be30a9669 100644 --- a/arch/s390/kernel/sysinfo.c +++ b/arch/s390/kernel/sysinfo.c @@ -20,7 +20,7 @@ #include <asm/sysinfo.h> #include <asm/cpcmd.h> #include <asm/topology.h> -#include <asm/fpu/api.h> +#include <asm/fpu.h> int topology_max_mnest; @@ -397,7 +397,7 @@ static void service_level_vm_print(struct seq_file *m, { char *query_buffer, *str; - query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); + query_buffer = kmalloc(1024, GFP_KERNEL); if (!query_buffer) return; cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); @@ -426,9 +426,9 @@ subsys_initcall(create_proc_service_level); */ void s390_adjust_jiffies(void) { + DECLARE_KERNEL_FPU_ONSTACK16(fpu); struct sysinfo_1_2_2 *info; unsigned long capability; - struct kernel_fpu fpu; info = (void *) get_zeroed_page(GFP_KERNEL); if (!info) @@ -447,21 +447,14 @@ void s390_adjust_jiffies(void) * point division .. */ kernel_fpu_begin(&fpu, KERNEL_FPR); - asm volatile( - " sfpc %3\n" - " l %0,%1\n" - " tmlh %0,0xff80\n" - " jnz 0f\n" - " cefbr %%f2,%0\n" - " j 1f\n" - "0: le %%f2,%1\n" - "1: cefbr %%f0,%2\n" - " debr %%f0,%%f2\n" - " cgebr %0,5,%%f0\n" - : "=&d" (capability) - : "Q" (info->capability), "d" (10000000), "d" (0) - : "cc" - ); + fpu_sfpc(0); + if (info->capability & 0xff800000) + fpu_ldgr(2, info->capability); + else + fpu_cefbr(2, info->capability); + fpu_cefbr(0, 10000000); + fpu_debr(0, 2); + capability = fpu_cgebr(0, 5); kernel_fpu_end(&fpu, KERNEL_FPR); } else /* diff --git a/arch/s390/kernel/text_amode31.S b/arch/s390/kernel/text_amode31.S index 14c6d25c03..c0a70efa24 100644 --- a/arch/s390/kernel/text_amode31.S +++ b/arch/s390/kernel/text_amode31.S @@ -90,7 +90,7 @@ SYM_FUNC_START(_diag26c_amode31) SYM_FUNC_END(_diag26c_amode31) /* - * void _diag0c_amode31(struct hypfs_diag0c_entry *entry) + * void _diag0c_amode31(unsigned long rx) */ SYM_FUNC_START(_diag0c_amode31) sam31 diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 14abad953c..fb9f31f366 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -251,8 +251,8 @@ static struct clocksource clocksource_tod = { .rating = 400, .read = read_tod_clock, .mask = CLOCKSOURCE_MASK(64), - .mult = 1000, - .shift = 12, + .mult = 4096000, + .shift = 24, .flags = CLOCK_SOURCE_IS_CONTINUOUS, .vdso_clock_mode = VDSO_CLOCKMODE_TOD, }; @@ -716,7 +716,7 @@ out_unlock: /* * STP subsys sysfs interface functions */ -static struct bus_type stp_subsys = { +static const struct bus_type stp_subsys = { .name = "stp", .dev_name = "stp", }; diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 46dac4540c..52578b5cec 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -28,8 +28,8 @@ #include <linux/cpu.h> #include <linux/entry-common.h> #include <asm/asm-extable.h> -#include <asm/fpu/api.h> #include <asm/vtime.h> +#include <asm/fpu.h> #include "entry.h" static inline void __user *get_trap_ip(struct pt_regs *regs) @@ -201,8 +201,8 @@ static void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - save_fpu_regs(); - vic = (current->thread.fpu.fpc & 0xf00) >> 8; + save_user_fpu_regs(); + vic = (current->thread.ufpu.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ si_code = FPE_FLTINV; @@ -227,9 +227,9 @@ static void vector_exception(struct pt_regs *regs) static void data_exception(struct pt_regs *regs) { - save_fpu_regs(); - if (current->thread.fpu.fpc & FPC_DXC_MASK) - do_fp_trap(regs, current->thread.fpu.fpc); + save_user_fpu_regs(); + if (current->thread.ufpu.fpc & FPC_DXC_MASK) + do_fp_trap(regs, current->thread.ufpu.fpc); else do_trap(regs, SIGILL, ILL_ILLOPN, "data exception"); } diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index b88345ef8b..5b0633ea8d 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -12,7 +12,6 @@ #include <linux/kdebug.h> #include <linux/sched/task_stack.h> -#include <asm/switch_to.h> #include <asm/facility.h> #include <asm/kprobes.h> #include <asm/dis.h> diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index bbaefd84f1..2f967ac2b8 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c @@ -25,10 +25,7 @@ extern char vdso32_start[], vdso32_end[]; static struct vm_special_mapping vvar_mapping; -static union { - struct vdso_data data[CS_BASES]; - u8 page[PAGE_SIZE]; -} vdso_data_store __page_aligned_data; +static union vdso_data_store vdso_data_store __page_aligned_data; struct vdso_data *vdso_data = vdso_data_store.data; @@ -213,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len) return addr; } -unsigned long vdso_size(void) +unsigned long vdso_text_size(void) { - unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; + unsigned long size; if (is_compat_task()) - size += vdso32_end - vdso32_start; + size = vdso32_end - vdso32_start; else - size += vdso64_end - vdso64_start; + size = vdso64_end - vdso64_start; return PAGE_ALIGN(size); } +unsigned long vdso_size(void) +{ + return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE; +} + int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { unsigned long addr = VDSO_BASE; diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index b12a274cbb..4800d80dec 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile @@ -19,8 +19,10 @@ KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) KBUILD_AFLAGS_32 += -m31 -s KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) +KBUILD_CFLAGS_32 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS)) KBUILD_CFLAGS_32 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_32)) -KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin +KBUILD_CFLAGS_32 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_32)) +KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin -fasynchronous-unwind-tables LDFLAGS_vdso32.so.dbg += -shared -soname=linux-vdso32.so.1 \ --hash-style=both --build-id=sha1 -melf_s390 -T diff --git a/arch/s390/kernel/vdso32/vdso32.lds.S b/arch/s390/kernel/vdso32/vdso32.lds.S index edf5ff1deb..65b9513a5a 100644 --- a/arch/s390/kernel/vdso32/vdso32.lds.S +++ b/arch/s390/kernel/vdso32/vdso32.lds.S @@ -9,7 +9,6 @@ OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") OUTPUT_ARCH(s390:31-bit) -ENTRY(_start) SECTIONS { diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index caa4ebff8a..2f2e4e9970 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile @@ -24,8 +24,11 @@ KBUILD_AFLAGS_64 := $(filter-out -m64,$(KBUILD_AFLAGS)) KBUILD_AFLAGS_64 += -m64 KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS)) +KBUILD_CFLAGS_64 := $(filter-out -mpacked-stack,$(KBUILD_CFLAGS_64)) KBUILD_CFLAGS_64 := $(filter-out -mno-pic-data-is-text-relative,$(KBUILD_CFLAGS_64)) -KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin +KBUILD_CFLAGS_64 := $(filter-out -munaligned-symbols,$(KBUILD_CFLAGS_64)) +KBUILD_CFLAGS_64 := $(filter-out -fno-asynchronous-unwind-tables,$(KBUILD_CFLAGS_64)) +KBUILD_CFLAGS_64 += -m64 -fPIC -fno-common -fno-builtin -fasynchronous-unwind-tables ldflags-y := -shared -soname=linux-vdso64.so.1 \ --hash-style=both --build-id=sha1 -T diff --git a/arch/s390/kernel/vdso64/vdso64.lds.S b/arch/s390/kernel/vdso64/vdso64.lds.S index 4461ea151e..37e2a505e8 100644 --- a/arch/s390/kernel/vdso64/vdso64.lds.S +++ b/arch/s390/kernel/vdso64/vdso64.lds.S @@ -9,7 +9,6 @@ OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390") OUTPUT_ARCH(s390:64-bit) -ENTRY(_start) SECTIONS { diff --git a/arch/s390/kernel/vdso64/vdso_user_wrapper.S b/arch/s390/kernel/vdso64/vdso_user_wrapper.S index 85247ef5a4..e26e68675c 100644 --- a/arch/s390/kernel/vdso64/vdso_user_wrapper.S +++ b/arch/s390/kernel/vdso64/vdso_user_wrapper.S @@ -6,8 +6,6 @@ #include <asm/dwarf.h> #include <asm/ptrace.h> -#define WRAPPER_FRAME_SIZE (STACK_FRAME_OVERHEAD+8) - /* * Older glibc version called vdso without allocating a stackframe. This wrapper * is just used to allocate a stackframe. See @@ -20,16 +18,17 @@ __ALIGN __kernel_\func: CFI_STARTPROC - aghi %r15,-WRAPPER_FRAME_SIZE - CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE) - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD - stg %r14,STACK_FRAME_OVERHEAD(%r15) - CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD + aghi %r15,-STACK_FRAME_VDSO_OVERHEAD + CFI_DEF_CFA_OFFSET (STACK_FRAME_USER_OVERHEAD + STACK_FRAME_VDSO_OVERHEAD) + CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD + stg %r14,__SFVDSO_RETURN_ADDRESS(%r15) + CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS + xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15) brasl %r14,__s390_vdso_\func - lg %r14,STACK_FRAME_OVERHEAD(%r15) + lg %r14,__SFVDSO_RETURN_ADDRESS(%r15) CFI_RESTORE 14 - aghi %r15,WRAPPER_FRAME_SIZE - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD + aghi %r15,STACK_FRAME_VDSO_OVERHEAD + CFI_DEF_CFA_OFFSET STACK_FRAME_USER_OVERHEAD CFI_RESTORE 15 br %r14 CFI_ENDPROC diff --git a/arch/s390/kernel/vmcore_info.c b/arch/s390/kernel/vmcore_info.c new file mode 100644 index 0000000000..d296dfc221 --- /dev/null +++ b/arch/s390/kernel/vmcore_info.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/vmcore_info.h> +#include <asm/abs_lowcore.h> +#include <linux/mm.h> +#include <asm/setup.h> + +void arch_crash_save_vmcoreinfo(void) +{ + struct lowcore *abs_lc; + + VMCOREINFO_SYMBOL(lowcore_ptr); + VMCOREINFO_SYMBOL(high_memory); + VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); + vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31); + vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31); + vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); + abs_lc = get_abs_lowcore(); + abs_lc->vmcore_info = paddr_vmcoreinfo_note(); + put_abs_lowcore(abs_lc); +} diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index e32ef446f4..fb9b32f936 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -59,6 +59,14 @@ SECTIONS } :text = 0x0700 RO_DATA(PAGE_SIZE) + .data.rel.ro : { + *(.data.rel.ro .data.rel.ro.*) + } + .got : { + __got_start = .; + *(.got) + __got_end = .; + } . = ALIGN(PAGE_SIZE); _sdata = .; /* Start of data section */ @@ -73,6 +81,9 @@ SECTIONS __end_ro_after_init = .; RW_DATA(0x100, PAGE_SIZE, THREAD_SIZE) + .data.rel : { + *(.data.rel*) + } BOOT_DATA_PRESERVED . = ALIGN(8); @@ -181,6 +192,7 @@ SECTIONS PERCPU_SECTION(0x100) +#ifdef CONFIG_PIE_BUILD .dynsym ALIGN(8) : { __dynsym_start = .; *(.dynsym) @@ -191,6 +203,19 @@ SECTIONS *(.rela*) __rela_dyn_end = .; } + .dynamic ALIGN(8) : { + *(.dynamic) + } + .dynstr ALIGN(8) : { + *(.dynstr) + } + .hash ALIGN(8) : { + *(.hash) + } + .gnu.hash ALIGN(8) : { + *(.gnu.hash) + } +#endif . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ @@ -214,9 +239,14 @@ SECTIONS QUAD(__boot_data_preserved_start) /* bootdata_preserved_off */ QUAD(__boot_data_preserved_end - __boot_data_preserved_start) /* bootdata_preserved_size */ +#ifdef CONFIG_PIE_BUILD QUAD(__dynsym_start) /* dynsym_start */ QUAD(__rela_dyn_start) /* rela_dyn_start */ QUAD(__rela_dyn_end) /* rela_dyn_end */ +#else + QUAD(__got_start) /* got_start */ + QUAD(__got_end) /* got_end */ +#endif QUAD(_eamode31 - _samode31) /* amode31_size */ QUAD(init_mm) QUAD(swapper_pg_dir) @@ -235,6 +265,30 @@ SECTIONS DWARF_DEBUG ELF_DETAILS + /* + * Make sure that the .got.plt is either completely empty or it + * contains only the three reserved double words. + */ + .got.plt : { + *(.got.plt) + } + ASSERT(SIZEOF(.got.plt) == 0 || SIZEOF(.got.plt) == 0x18, "Unexpected GOT/PLT entries detected!") + + /* + * Sections that should stay zero sized, which is safer to + * explicitly check instead of blindly discarding. + */ + .plt : { + *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) + } + ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") +#ifndef CONFIG_PIE_BUILD + .rela.dyn : { + *(.rela.*) *(.rela_*) + } + ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!") +#endif + /* Sections to be discarded */ DISCARDS /DISCARD/ : { |