diff options
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/Makefile | 18 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 130 | ||||
-rw-r--r-- | arch/s390/lib/error-inject.c | 14 | ||||
-rw-r--r-- | arch/s390/lib/find.c | 76 | ||||
-rw-r--r-- | arch/s390/lib/mem.S | 191 | ||||
-rw-r--r-- | arch/s390/lib/probes.c | 161 | ||||
-rw-r--r-- | arch/s390/lib/spinlock.c | 324 | ||||
-rw-r--r-- | arch/s390/lib/string.c | 371 | ||||
-rw-r--r-- | arch/s390/lib/test_unwind.c | 353 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.c | 448 | ||||
-rw-r--r-- | arch/s390/lib/xor.c | 136 |
11 files changed, 2222 insertions, 0 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile new file mode 100644 index 000000000..678333936 --- /dev/null +++ b/arch/s390/lib/Makefile @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for s390-specific library files.. +# + +lib-y += delay.o string.o uaccess.o find.o spinlock.o +obj-y += mem.o xor.o +lib-$(CONFIG_KPROBES) += probes.o +lib-$(CONFIG_UPROBES) += probes.o + +# Instrumenting memory accesses to __user data (in different address space) +# produce false positives +KASAN_SANITIZE_uaccess.o := n + +obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o +CFLAGS_test_unwind.o += -fno-optimize-sibling-calls + +lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c new file mode 100644 index 000000000..8c0c68e77 --- /dev/null +++ b/arch/s390/lib/delay.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Precise Delay Loops for S390 + * + * Copyright IBM Corp. 1999, 2008 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + * Heiko Carstens <heiko.carstens@de.ibm.com>, + */ + +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/export.h> +#include <linux/irqflags.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <asm/vtimer.h> +#include <asm/div64.h> +#include <asm/idle.h> + +void __delay(unsigned long loops) +{ + /* + * To end the bloody studid and useless discussion about the + * BogoMips number I took the liberty to define the __delay + * function in a way that that resulting BogoMips number will + * yield the megahertz number of the cpu. The important function + * is udelay and that is done using the tod clock. -- martin. + */ + asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); +} +EXPORT_SYMBOL(__delay); + +static void __udelay_disabled(unsigned long long usecs) +{ + unsigned long cr0, cr0_new, psw_mask; + struct s390_idle_data idle; + u64 end; + + end = get_tod_clock() + (usecs << 12); + __ctl_store(cr0, 0, 0); + cr0_new = cr0 & ~CR0_IRQ_SUBCLASS_MASK; + cr0_new |= (1UL << (63 - 52)); /* enable clock comparator irq */ + __ctl_load(cr0_new, 0, 0); + psw_mask = __extract_psw() | PSW_MASK_EXT | PSW_MASK_WAIT; + set_clock_comparator(end); + set_cpu_flag(CIF_IGNORE_IRQ); + psw_idle(&idle, psw_mask); + trace_hardirqs_off(); + clear_cpu_flag(CIF_IGNORE_IRQ); + set_clock_comparator(S390_lowcore.clock_comparator); + __ctl_load(cr0, 0, 0); +} + +static void __udelay_enabled(unsigned long long usecs) +{ + u64 clock_saved, end; + + end = get_tod_clock_fast() + (usecs << 12); + do { + clock_saved = 0; + if (tod_after(S390_lowcore.clock_comparator, end)) { + clock_saved = local_tick_disable(); + set_clock_comparator(end); + } + enabled_wait(); + if (clock_saved) + local_tick_enable(clock_saved); + } while (get_tod_clock_fast() < end); +} + +/* + * Waits for 'usecs' microseconds using the TOD clock comparator. + */ +void __udelay(unsigned long long usecs) +{ + unsigned long flags; + + preempt_disable(); + local_irq_save(flags); + if (in_irq()) { + __udelay_disabled(usecs); + goto out; + } + if (in_softirq()) { + if (raw_irqs_disabled_flags(flags)) + __udelay_disabled(usecs); + else + __udelay_enabled(usecs); + goto out; + } + if (raw_irqs_disabled_flags(flags)) { + local_bh_disable(); + __udelay_disabled(usecs); + _local_bh_enable(); + goto out; + } + __udelay_enabled(usecs); +out: + local_irq_restore(flags); + preempt_enable(); +} +EXPORT_SYMBOL(__udelay); + +/* + * Simple udelay variant. To be used on startup and reboot + * when the interrupt handler isn't working. + */ +void udelay_simple(unsigned long long usecs) +{ + u64 end; + + end = get_tod_clock_fast() + (usecs << 12); + while (get_tod_clock_fast() < end) + cpu_relax(); +} + +void __ndelay(unsigned long long nsecs) +{ + u64 end; + + nsecs <<= 9; + do_div(nsecs, 125); + end = get_tod_clock_fast() + nsecs; + if (nsecs & ~0xfffUL) + __udelay(nsecs >> 12); + while (get_tod_clock_fast() < end) + barrier(); +} +EXPORT_SYMBOL(__ndelay); diff --git a/arch/s390/lib/error-inject.c b/arch/s390/lib/error-inject.c new file mode 100644 index 000000000..8c9d4da87 --- /dev/null +++ b/arch/s390/lib/error-inject.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0+ +#include <asm/ptrace.h> +#include <linux/error-injection.h> +#include <linux/kprobes.h> + +void override_function_with_return(struct pt_regs *regs) +{ + /* + * Emulate 'br 14'. 'regs' is captured by kprobes on entry to some + * kernel function. + */ + regs->psw.addr = regs->gprs[14]; +} +NOKPROBE_SYMBOL(override_function_with_return); diff --git a/arch/s390/lib/find.c b/arch/s390/lib/find.c new file mode 100644 index 000000000..96a8a2e2d --- /dev/null +++ b/arch/s390/lib/find.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MSB0 numbered special bitops handling. + * + * The bits are numbered: + * |0..............63|64............127|128...........191|192...........255| + * + * The reason for this bit numbering is the fact that the hardware sets bits + * in a bitmap starting at bit 0 (MSB) and we don't want to scan the bitmap + * from the 'wrong end'. + */ + +#include <linux/compiler.h> +#include <linux/bitops.h> +#include <linux/export.h> + +unsigned long find_first_bit_inv(const unsigned long *addr, unsigned long size) +{ + const unsigned long *p = addr; + unsigned long result = 0; + unsigned long tmp; + + while (size & ~(BITS_PER_LONG - 1)) { + if ((tmp = *(p++))) + goto found; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = (*p) & (~0UL << (BITS_PER_LONG - size)); + if (!tmp) /* Are any bits set? */ + return result + size; /* Nope. */ +found: + return result + (__fls(tmp) ^ (BITS_PER_LONG - 1)); +} +EXPORT_SYMBOL(find_first_bit_inv); + +unsigned long find_next_bit_inv(const unsigned long *addr, unsigned long size, + unsigned long offset) +{ + const unsigned long *p = addr + (offset / BITS_PER_LONG); + unsigned long result = offset & ~(BITS_PER_LONG - 1); + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset %= BITS_PER_LONG; + if (offset) { + tmp = *(p++); + tmp &= (~0UL >> offset); + if (size < BITS_PER_LONG) + goto found_first; + if (tmp) + goto found_middle; + size -= BITS_PER_LONG; + result += BITS_PER_LONG; + } + while (size & ~(BITS_PER_LONG-1)) { + if ((tmp = *(p++))) + goto found_middle; + result += BITS_PER_LONG; + size -= BITS_PER_LONG; + } + if (!size) + return result; + tmp = *p; +found_first: + tmp &= (~0UL << (BITS_PER_LONG - size)); + if (!tmp) /* Are any bits set? */ + return result + size; /* Nope. */ +found_middle: + return result + (__fls(tmp) ^ (BITS_PER_LONG - 1)); +} +EXPORT_SYMBOL(find_next_bit_inv); diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S new file mode 100644 index 000000000..dc0874f2e --- /dev/null +++ b/arch/s390/lib/mem.S @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * String handling functions. + * + * Copyright IBM Corp. 2012 + */ + +#include <linux/linkage.h> +#include <asm/export.h> +#include <asm/nospec-insn.h> + + GEN_BR_THUNK %r14 + +/* + * void *memmove(void *dest, const void *src, size_t n) + */ +WEAK(memmove) +ENTRY(__memmove) + ltgr %r4,%r4 + lgr %r1,%r2 + jz .Lmemmove_exit + aghi %r4,-1 + clgr %r2,%r3 + jnh .Lmemmove_forward + la %r5,1(%r4,%r3) + clgr %r2,%r5 + jl .Lmemmove_reverse +.Lmemmove_forward: + srlg %r0,%r4,8 + ltgr %r0,%r0 + jz .Lmemmove_forward_remainder +.Lmemmove_forward_loop: + mvc 0(256,%r1),0(%r3) + la %r1,256(%r1) + la %r3,256(%r3) + brctg %r0,.Lmemmove_forward_loop +.Lmemmove_forward_remainder: + larl %r5,.Lmemmove_mvc + ex %r4,0(%r5) +.Lmemmove_exit: + BR_EX %r14 +.Lmemmove_reverse: + ic %r0,0(%r4,%r3) + stc %r0,0(%r4,%r1) + brctg %r4,.Lmemmove_reverse + ic %r0,0(%r4,%r3) + stc %r0,0(%r4,%r1) + BR_EX %r14 +.Lmemmove_mvc: + mvc 0(1,%r1),0(%r3) +ENDPROC(__memmove) +EXPORT_SYMBOL(memmove) + +/* + * memset implementation + * + * This code corresponds to the C construct below. We do distinguish + * between clearing (c == 0) and setting a memory array (c != 0) simply + * because nearly all memset invocations in the kernel clear memory and + * the xc instruction is preferred in such cases. + * + * void *memset(void *s, int c, size_t n) + * { + * if (likely(c == 0)) + * return __builtin_memset(s, 0, n); + * return __builtin_memset(s, c, n); + * } + */ +WEAK(memset) +ENTRY(__memset) + ltgr %r4,%r4 + jz .Lmemset_exit + ltgr %r3,%r3 + jnz .Lmemset_fill + aghi %r4,-1 + srlg %r3,%r4,8 + ltgr %r3,%r3 + lgr %r1,%r2 + jz .Lmemset_clear_remainder +.Lmemset_clear_loop: + xc 0(256,%r1),0(%r1) + la %r1,256(%r1) + brctg %r3,.Lmemset_clear_loop +.Lmemset_clear_remainder: + larl %r3,.Lmemset_xc + ex %r4,0(%r3) +.Lmemset_exit: + BR_EX %r14 +.Lmemset_fill: + cghi %r4,1 + lgr %r1,%r2 + je .Lmemset_fill_exit + aghi %r4,-2 + srlg %r5,%r4,8 + ltgr %r5,%r5 + jz .Lmemset_fill_remainder +.Lmemset_fill_loop: + stc %r3,0(%r1) + mvc 1(255,%r1),0(%r1) + la %r1,256(%r1) + brctg %r5,.Lmemset_fill_loop +.Lmemset_fill_remainder: + stc %r3,0(%r1) + larl %r5,.Lmemset_mvc + ex %r4,0(%r5) + BR_EX %r14 +.Lmemset_fill_exit: + stc %r3,0(%r1) + BR_EX %r14 +.Lmemset_xc: + xc 0(1,%r1),0(%r1) +.Lmemset_mvc: + mvc 1(1,%r1),0(%r1) +ENDPROC(__memset) +EXPORT_SYMBOL(memset) + +/* + * memcpy implementation + * + * void *memcpy(void *dest, const void *src, size_t n) + */ +WEAK(memcpy) +ENTRY(__memcpy) + ltgr %r4,%r4 + jz .Lmemcpy_exit + aghi %r4,-1 + srlg %r5,%r4,8 + ltgr %r5,%r5 + lgr %r1,%r2 + jnz .Lmemcpy_loop +.Lmemcpy_remainder: + larl %r5,.Lmemcpy_mvc + ex %r4,0(%r5) +.Lmemcpy_exit: + BR_EX %r14 +.Lmemcpy_loop: + mvc 0(256,%r1),0(%r3) + la %r1,256(%r1) + la %r3,256(%r3) + brctg %r5,.Lmemcpy_loop + j .Lmemcpy_remainder +.Lmemcpy_mvc: + mvc 0(1,%r1),0(%r3) +ENDPROC(__memcpy) +EXPORT_SYMBOL(memcpy) + +/* + * __memset16/32/64 + * + * void *__memset16(uint16_t *s, uint16_t v, size_t count) + * void *__memset32(uint32_t *s, uint32_t v, size_t count) + * void *__memset64(uint64_t *s, uint64_t v, size_t count) + */ +.macro __MEMSET bits,bytes,insn +ENTRY(__memset\bits) + ltgr %r4,%r4 + jz .L__memset_exit\bits + cghi %r4,\bytes + je .L__memset_store\bits + aghi %r4,-(\bytes+1) + srlg %r5,%r4,8 + ltgr %r5,%r5 + lgr %r1,%r2 + jz .L__memset_remainder\bits +.L__memset_loop\bits: + \insn %r3,0(%r1) + mvc \bytes(256-\bytes,%r1),0(%r1) + la %r1,256(%r1) + brctg %r5,.L__memset_loop\bits +.L__memset_remainder\bits: + \insn %r3,0(%r1) + larl %r5,.L__memset_mvc\bits + ex %r4,0(%r5) + BR_EX %r14 +.L__memset_store\bits: + \insn %r3,0(%r2) +.L__memset_exit\bits: + BR_EX %r14 +.L__memset_mvc\bits: + mvc \bytes(1,%r1),0(%r1) +ENDPROC(__memset\bits) +.endm + +__MEMSET 16,2,sth +EXPORT_SYMBOL(__memset16) + +__MEMSET 32,4,st +EXPORT_SYMBOL(__memset32) + +__MEMSET 64,8,stg +EXPORT_SYMBOL(__memset64) diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c new file mode 100644 index 000000000..1e184a034 --- /dev/null +++ b/arch/s390/lib/probes.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Common helper functions for kprobes and uprobes + * + * Copyright IBM Corp. 2014 + */ + +#include <linux/errno.h> +#include <asm/kprobes.h> +#include <asm/dis.h> + +int probe_is_prohibited_opcode(u16 *insn) +{ + if (!is_known_insn((unsigned char *)insn)) + return -EINVAL; + switch (insn[0] >> 8) { + case 0x0c: /* bassm */ + case 0x0b: /* bsm */ + case 0x83: /* diag */ + case 0x44: /* ex */ + case 0xac: /* stnsm */ + case 0xad: /* stosm */ + return -EINVAL; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x00: /* exrl */ + return -EINVAL; + } + } + switch (insn[0]) { + case 0x0101: /* pr */ + case 0xb25a: /* bsa */ + case 0xb240: /* bakr */ + case 0xb258: /* bsg */ + case 0xb218: /* pc */ + case 0xb228: /* pt */ + case 0xb98d: /* epsw */ + case 0xe560: /* tbegin */ + case 0xe561: /* tbeginc */ + case 0xb2f8: /* tend */ + return -EINVAL; + } + return 0; +} + +int probe_get_fixup_type(u16 *insn) +{ + /* default fixup method */ + int fixup = FIXUP_PSW_NORMAL; + + switch (insn[0] >> 8) { + case 0x05: /* balr */ + case 0x0d: /* basr */ + fixup = FIXUP_RETURN_REGISTER; + /* if r2 = 0, no branch will be taken */ + if ((insn[0] & 0x0f) == 0) + fixup |= FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x06: /* bctr */ + case 0x07: /* bcr */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x45: /* bal */ + case 0x4d: /* bas */ + fixup = FIXUP_RETURN_REGISTER; + break; + case 0x47: /* bc */ + case 0x46: /* bct */ + case 0x86: /* bxh */ + case 0x87: /* bxle */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0x82: /* lpsw */ + fixup = FIXUP_NOT_REQUIRED; + break; + case 0xb2: /* lpswe */ + if ((insn[0] & 0xff) == 0xb2) + fixup = FIXUP_NOT_REQUIRED; + break; + case 0xa7: /* bras */ + if ((insn[0] & 0x0f) == 0x05) + fixup |= FIXUP_RETURN_REGISTER; + break; + case 0xc0: + if ((insn[0] & 0x0f) == 0x05) /* brasl */ + fixup |= FIXUP_RETURN_REGISTER; + break; + case 0xeb: + switch (insn[2] & 0xff) { + case 0x44: /* bxhg */ + case 0x45: /* bxleg */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } + break; + case 0xe3: /* bctg */ + if ((insn[2] & 0xff) == 0x46) + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + case 0xec: + switch (insn[2] & 0xff) { + case 0xe5: /* clgrb */ + case 0xe6: /* cgrb */ + case 0xf6: /* crb */ + case 0xf7: /* clrb */ + case 0xfc: /* cgib */ + case 0xfd: /* cglib */ + case 0xfe: /* cib */ + case 0xff: /* clib */ + fixup = FIXUP_BRANCH_NOT_TAKEN; + break; + } + break; + } + return fixup; +} + +int probe_is_insn_relative_long(u16 *insn) +{ + /* Check if we have a RIL-b or RIL-c format instruction which + * we need to modify in order to avoid instruction emulation. */ + switch (insn[0] >> 8) { + case 0xc0: + if ((insn[0] & 0x0f) == 0x00) /* larl */ + return true; + break; + case 0xc4: + switch (insn[0] & 0x0f) { + case 0x02: /* llhrl */ + case 0x04: /* lghrl */ + case 0x05: /* lhrl */ + case 0x06: /* llghrl */ + case 0x07: /* sthrl */ + case 0x08: /* lgrl */ + case 0x0b: /* stgrl */ + case 0x0c: /* lgfrl */ + case 0x0d: /* lrl */ + case 0x0e: /* llgfrl */ + case 0x0f: /* strl */ + return true; + } + break; + case 0xc6: + switch (insn[0] & 0x0f) { + case 0x02: /* pfdrl */ + case 0x04: /* cghrl */ + case 0x05: /* chrl */ + case 0x06: /* clghrl */ + case 0x07: /* clhrl */ + case 0x08: /* cgrl */ + case 0x0a: /* clgrl */ + case 0x0c: /* cgfrl */ + case 0x0d: /* crl */ + case 0x0e: /* clgfrl */ + case 0x0f: /* clrl */ + return true; + } + break; + } + return false; +} diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c new file mode 100644 index 000000000..9b2dab5a6 --- /dev/null +++ b/arch/s390/lib/spinlock.c @@ -0,0 +1,324 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Out of line spinlock code. + * + * Copyright IBM Corp. 2004, 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/types.h> +#include <linux/export.h> +#include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/percpu.h> +#include <asm/alternative.h> +#include <asm/io.h> + +int spin_retry = -1; + +static int __init spin_retry_init(void) +{ + if (spin_retry < 0) + spin_retry = 1000; + return 0; +} +early_initcall(spin_retry_init); + +/** + * spin_retry= parameter + */ +static int __init spin_retry_setup(char *str) +{ + spin_retry = simple_strtoul(str, &str, 0); + return 1; +} +__setup("spin_retry=", spin_retry_setup); + +struct spin_wait { + struct spin_wait *next, *prev; + int node_id; +} __aligned(32); + +static DEFINE_PER_CPU_ALIGNED(struct spin_wait, spin_wait[4]); + +#define _Q_LOCK_CPU_OFFSET 0 +#define _Q_LOCK_STEAL_OFFSET 16 +#define _Q_TAIL_IDX_OFFSET 18 +#define _Q_TAIL_CPU_OFFSET 20 + +#define _Q_LOCK_CPU_MASK 0x0000ffff +#define _Q_LOCK_STEAL_ADD 0x00010000 +#define _Q_LOCK_STEAL_MASK 0x00030000 +#define _Q_TAIL_IDX_MASK 0x000c0000 +#define _Q_TAIL_CPU_MASK 0xfff00000 + +#define _Q_LOCK_MASK (_Q_LOCK_CPU_MASK | _Q_LOCK_STEAL_MASK) +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK) + +void arch_spin_lock_setup(int cpu) +{ + struct spin_wait *node; + int ix; + + node = per_cpu_ptr(&spin_wait[0], cpu); + for (ix = 0; ix < 4; ix++, node++) { + memset(node, 0, sizeof(*node)); + node->node_id = ((cpu + 1) << _Q_TAIL_CPU_OFFSET) + + (ix << _Q_TAIL_IDX_OFFSET); + } +} + +static inline int arch_load_niai4(int *lock) +{ + int owner; + + asm_inline volatile( + ALTERNATIVE("", ".long 0xb2fa0040", 49) /* NIAI 4 */ + " l %0,%1\n" + : "=d" (owner) : "Q" (*lock) : "memory"); + return owner; +} + +static inline int arch_cmpxchg_niai8(int *lock, int old, int new) +{ + int expected = old; + + asm_inline volatile( + ALTERNATIVE("", ".long 0xb2fa0080", 49) /* NIAI 8 */ + " cs %0,%3,%1\n" + : "=d" (old), "=Q" (*lock) + : "0" (old), "d" (new), "Q" (*lock) + : "cc", "memory"); + return expected == old; +} + +static inline struct spin_wait *arch_spin_decode_tail(int lock) +{ + int ix, cpu; + + ix = (lock & _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET; + cpu = (lock & _Q_TAIL_CPU_MASK) >> _Q_TAIL_CPU_OFFSET; + return per_cpu_ptr(&spin_wait[ix], cpu - 1); +} + +static inline int arch_spin_yield_target(int lock, struct spin_wait *node) +{ + if (lock & _Q_LOCK_CPU_MASK) + return lock & _Q_LOCK_CPU_MASK; + if (node == NULL || node->prev == NULL) + return 0; /* 0 -> no target cpu */ + while (node->prev) + node = node->prev; + return node->node_id >> _Q_TAIL_CPU_OFFSET; +} + +static inline void arch_spin_lock_queued(arch_spinlock_t *lp) +{ + struct spin_wait *node, *next; + int lockval, ix, node_id, tail_id, old, new, owner, count; + + ix = S390_lowcore.spinlock_index++; + barrier(); + lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ + node = this_cpu_ptr(&spin_wait[ix]); + node->prev = node->next = NULL; + node_id = node->node_id; + + /* Enqueue the node for this CPU in the spinlock wait queue */ + while (1) { + old = READ_ONCE(lp->lock); + if ((old & _Q_LOCK_CPU_MASK) == 0 && + (old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) { + /* + * The lock is free but there may be waiters. + * With no waiters simply take the lock, if there + * are waiters try to steal the lock. The lock may + * be stolen three times before the next queued + * waiter will get the lock. + */ + new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval; + if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + /* Got the lock */ + goto out; + /* lock passing in progress */ + continue; + } + /* Make the node of this CPU the new tail. */ + new = node_id | (old & _Q_LOCK_MASK); + if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + break; + } + /* Set the 'next' pointer of the tail node in the queue */ + tail_id = old & _Q_TAIL_MASK; + if (tail_id != 0) { + node->prev = arch_spin_decode_tail(tail_id); + WRITE_ONCE(node->prev->next, node); + } + + /* Pass the virtual CPU to the lock holder if it is not running */ + owner = arch_spin_yield_target(old, node); + if (owner && arch_vcpu_is_preempted(owner - 1)) + smp_yield_cpu(owner - 1); + + /* Spin on the CPU local node->prev pointer */ + if (tail_id != 0) { + count = spin_retry; + while (READ_ONCE(node->prev) != NULL) { + if (count-- >= 0) + continue; + count = spin_retry; + /* Query running state of lock holder again. */ + owner = arch_spin_yield_target(old, node); + if (owner && arch_vcpu_is_preempted(owner - 1)) + smp_yield_cpu(owner - 1); + } + } + + /* Spin on the lock value in the spinlock_t */ + count = spin_retry; + while (1) { + old = READ_ONCE(lp->lock); + owner = old & _Q_LOCK_CPU_MASK; + if (!owner) { + tail_id = old & _Q_TAIL_MASK; + new = ((tail_id != node_id) ? tail_id : 0) | lockval; + if (__atomic_cmpxchg_bool(&lp->lock, old, new)) + /* Got the lock */ + break; + continue; + } + if (count-- >= 0) + continue; + count = spin_retry; + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1)) + smp_yield_cpu(owner - 1); + } + + /* Pass lock_spin job to next CPU in the queue */ + if (node_id && tail_id != node_id) { + /* Wait until the next CPU has set up the 'next' pointer */ + while ((next = READ_ONCE(node->next)) == NULL) + ; + next->prev = NULL; + } + + out: + S390_lowcore.spinlock_index--; +} + +static inline void arch_spin_lock_classic(arch_spinlock_t *lp) +{ + int lockval, old, new, owner, count; + + lockval = SPINLOCK_LOCKVAL; /* cpu + 1 */ + + /* Pass the virtual CPU to the lock holder if it is not running */ + owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); + if (owner && arch_vcpu_is_preempted(owner - 1)) + smp_yield_cpu(owner - 1); + + count = spin_retry; + while (1) { + old = arch_load_niai4(&lp->lock); + owner = old & _Q_LOCK_CPU_MASK; + /* Try to get the lock if it is free. */ + if (!owner) { + new = (old & _Q_TAIL_MASK) | lockval; + if (arch_cmpxchg_niai8(&lp->lock, old, new)) { + /* Got the lock */ + return; + } + continue; + } + if (count-- >= 0) + continue; + count = spin_retry; + if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(owner - 1)) + smp_yield_cpu(owner - 1); + } +} + +void arch_spin_lock_wait(arch_spinlock_t *lp) +{ + if (test_cpu_flag(CIF_DEDICATED_CPU)) + arch_spin_lock_queued(lp); + else + arch_spin_lock_classic(lp); +} +EXPORT_SYMBOL(arch_spin_lock_wait); + +int arch_spin_trylock_retry(arch_spinlock_t *lp) +{ + int cpu = SPINLOCK_LOCKVAL; + int owner, count; + + for (count = spin_retry; count > 0; count--) { + owner = READ_ONCE(lp->lock); + /* Try to get the lock if it is free. */ + if (!owner) { + if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu)) + return 1; + } + } + return 0; +} +EXPORT_SYMBOL(arch_spin_trylock_retry); + +void arch_read_lock_wait(arch_rwlock_t *rw) +{ + if (unlikely(in_interrupt())) { + while (READ_ONCE(rw->cnts) & 0x10000) + barrier(); + return; + } + + /* Remove this reader again to allow recursive read locking */ + __atomic_add_const(-1, &rw->cnts); + /* Put the reader into the wait queue */ + arch_spin_lock(&rw->wait); + /* Now add this reader to the count value again */ + __atomic_add_const(1, &rw->cnts); + /* Loop until the writer is done */ + while (READ_ONCE(rw->cnts) & 0x10000) + barrier(); + arch_spin_unlock(&rw->wait); +} +EXPORT_SYMBOL(arch_read_lock_wait); + +void arch_write_lock_wait(arch_rwlock_t *rw) +{ + int old; + + /* Add this CPU to the write waiters */ + __atomic_add(0x20000, &rw->cnts); + + /* Put the writer into the wait queue */ + arch_spin_lock(&rw->wait); + + while (1) { + old = READ_ONCE(rw->cnts); + if ((old & 0x1ffff) == 0 && + __atomic_cmpxchg_bool(&rw->cnts, old, old | 0x10000)) + /* Got the lock */ + break; + barrier(); + } + + arch_spin_unlock(&rw->wait); +} +EXPORT_SYMBOL(arch_write_lock_wait); + +void arch_spin_relax(arch_spinlock_t *lp) +{ + int cpu; + + cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; + if (!cpu) + return; + if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(cpu - 1)) + return; + smp_yield_cpu(cpu - 1); +} +EXPORT_SYMBOL(arch_spin_relax); diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c new file mode 100644 index 000000000..db4e539d8 --- /dev/null +++ b/arch/s390/lib/string.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Optimized string functions + * + * S390 version + * Copyright IBM Corp. 2004 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define IN_ARCH_STRING_C 1 + +#include <linux/types.h> +#include <linux/string.h> +#include <linux/export.h> + +/* + * Helper functions to find the end of a string + */ +static inline char *__strend(const char *s) +{ + register unsigned long r0 asm("0") = 0; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (r0), "+a" (s) : : "cc", "memory"); + return (char *) r0; +} + +static inline char *__strnend(const char *s, size_t n) +{ + register unsigned long r0 asm("0") = 0; + const char *p = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b" + : "+d" (p), "+a" (s) : "d" (r0) : "cc", "memory"); + return (char *) p; +} + +/** + * strlen - Find the length of a string + * @s: The string to be sized + * + * returns the length of @s + */ +#ifdef __HAVE_ARCH_STRLEN +size_t strlen(const char *s) +{ + return __strend(s) - s; +} +EXPORT_SYMBOL(strlen); +#endif + +/** + * strnlen - Find the length of a length-limited string + * @s: The string to be sized + * @n: The maximum number of bytes to search + * + * returns the minimum of the length of @s and @n + */ +#ifdef __HAVE_ARCH_STRNLEN +size_t strnlen(const char *s, size_t n) +{ + return __strnend(s, n) - s; +} +EXPORT_SYMBOL(strnlen); +#endif + +/** + * strcpy - Copy a %NUL terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * + * returns a pointer to @dest + */ +#ifdef __HAVE_ARCH_STRCPY +char *strcpy(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + char *ret = dest; + + asm volatile ("0: mvst %0,%1\n" + " jo 0b" + : "+&a" (dest), "+&a" (src) : "d" (r0) + : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL(strcpy); +#endif + +/** + * strlcpy - Copy a %NUL terminated string into a sized buffer + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @size: size of destination buffer + * + * Compatible with *BSD: the result is always a valid + * NUL-terminated string that fits in the buffer (unless, + * of course, the buffer size is zero). It does not pad + * out the result like strncpy() does. + */ +#ifdef __HAVE_ARCH_STRLCPY +size_t strlcpy(char *dest, const char *src, size_t size) +{ + size_t ret = __strend(src) - src; + + if (size) { + size_t len = (ret >= size) ? size-1 : ret; + dest[len] = '\0'; + memcpy(dest, src, len); + } + return ret; +} +EXPORT_SYMBOL(strlcpy); +#endif + +/** + * strncpy - Copy a length-limited, %NUL-terminated string + * @dest: Where to copy the string to + * @src: Where to copy the string from + * @n: The maximum number of bytes to copy + * + * The result is not %NUL-terminated if the source exceeds + * @n bytes. + */ +#ifdef __HAVE_ARCH_STRNCPY +char *strncpy(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + memset(dest + len, 0, n - len); + memcpy(dest, src, len); + return dest; +} +EXPORT_SYMBOL(strncpy); +#endif + +/** + * strcat - Append one %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * + * returns a pointer to @dest + */ +#ifdef __HAVE_ARCH_STRCAT +char *strcat(char *dest, const char *src) +{ + register int r0 asm("0") = 0; + unsigned long dummy; + char *ret = dest; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + "1: mvst %0,%2\n" + " jo 1b" + : "=&a" (dummy), "+a" (dest), "+a" (src) + : "d" (r0), "0" (0UL) : "cc", "memory" ); + return ret; +} +EXPORT_SYMBOL(strcat); +#endif + +/** + * strlcat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The size of the destination buffer. + */ +#ifdef __HAVE_ARCH_STRLCAT +size_t strlcat(char *dest, const char *src, size_t n) +{ + size_t dsize = __strend(dest) - dest; + size_t len = __strend(src) - src; + size_t res = dsize + len; + + if (dsize < n) { + dest += dsize; + n -= dsize; + if (len >= n) + len = n - 1; + dest[len] = '\0'; + memcpy(dest, src, len); + } + return res; +} +EXPORT_SYMBOL(strlcat); +#endif + +/** + * strncat - Append a length-limited, %NUL-terminated string to another + * @dest: The string to be appended to + * @src: The string to append to it + * @n: The maximum numbers of bytes to copy + * + * returns a pointer to @dest + * + * Note that in contrast to strncpy, strncat ensures the result is + * terminated. + */ +#ifdef __HAVE_ARCH_STRNCAT +char *strncat(char *dest, const char *src, size_t n) +{ + size_t len = __strnend(src, n) - src; + char *p = __strend(dest); + + p[len] = '\0'; + memcpy(p, src, len); + return dest; +} +EXPORT_SYMBOL(strncat); +#endif + +/** + * strcmp - Compare two strings + * @s1: One string + * @s2: Another string + * + * returns 0 if @s1 and @s2 are equal, + * < 0 if @s1 is less than @s2 + * > 0 if @s1 is greater than @s2 + */ +#ifdef __HAVE_ARCH_STRCMP +int strcmp(const char *s1, const char *s2) +{ + register int r0 asm("0") = 0; + int ret = 0; + + asm volatile ("0: clst %2,%3\n" + " jo 0b\n" + " je 1f\n" + " ic %0,0(%2)\n" + " ic %1,0(%3)\n" + " sr %0,%1\n" + "1:" + : "+d" (ret), "+d" (r0), "+a" (s1), "+a" (s2) + : : "cc", "memory"); + return ret; +} +EXPORT_SYMBOL(strcmp); +#endif + +/** + * strrchr - Find the last occurrence of a character in a string + * @s: The string to be searched + * @c: The character to search for + */ +#ifdef __HAVE_ARCH_STRRCHR +char *strrchr(const char *s, int c) +{ + ssize_t len = __strend(s) - s; + + do { + if (s[len] == (char)c) + return (char *)s + len; + } while (--len >= 0); + return NULL; +} +EXPORT_SYMBOL(strrchr); +#endif + +static inline int clcle(const char *s1, unsigned long l1, + const char *s2, unsigned long l2) +{ + register unsigned long r2 asm("2") = (unsigned long) s1; + register unsigned long r3 asm("3") = (unsigned long) l1; + register unsigned long r4 asm("4") = (unsigned long) s2; + register unsigned long r5 asm("5") = (unsigned long) l2; + int cc; + + asm volatile ("0: clcle %1,%3,0\n" + " jo 0b\n" + " ipm %0\n" + " srl %0,28" + : "=&d" (cc), "+a" (r2), "+a" (r3), + "+a" (r4), "+a" (r5) : : "cc", "memory"); + return cc; +} + +/** + * strstr - Find the first substring in a %NUL terminated string + * @s1: The string to be searched + * @s2: The string to search for + */ +#ifdef __HAVE_ARCH_STRSTR +char *strstr(const char *s1, const char *s2) +{ + int l1, l2; + + l2 = __strend(s2) - s2; + if (!l2) + return (char *) s1; + l1 = __strend(s1) - s1; + while (l1-- >= l2) { + int cc; + + cc = clcle(s1, l2, s2, l2); + if (!cc) + return (char *) s1; + s1++; + } + return NULL; +} +EXPORT_SYMBOL(strstr); +#endif + +/** + * memchr - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or %NULL + * if @c is not found + */ +#ifdef __HAVE_ARCH_MEMCHR +void *memchr(const void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + " jl 1f\n" + " la %0,0\n" + "1:" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); + return (void *) ret; +} +EXPORT_SYMBOL(memchr); +#endif + +/** + * memcmp - Compare two areas of memory + * @s1: One area of memory + * @s2: Another area of memory + * @n: The size of the area. + */ +#ifdef __HAVE_ARCH_MEMCMP +int memcmp(const void *s1, const void *s2, size_t n) +{ + int ret; + + ret = clcle(s1, n, s2, n); + if (ret) + ret = ret == 1 ? -1 : 1; + return ret; +} +EXPORT_SYMBOL(memcmp); +#endif + +/** + * memscan - Find a character in an area of memory. + * @s: The memory area + * @c: The byte to search for + * @n: The size of the area. + * + * returns the address of the first occurrence of @c, or 1 byte past + * the area if @c is not found + */ +#ifdef __HAVE_ARCH_MEMSCAN +void *memscan(void *s, int c, size_t n) +{ + register int r0 asm("0") = (char) c; + const void *ret = s + n; + + asm volatile ("0: srst %0,%1\n" + " jo 0b\n" + : "+a" (ret), "+&a" (s) : "d" (r0) : "cc", "memory"); + return (void *) ret; +} +EXPORT_SYMBOL(memscan); +#endif diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c new file mode 100644 index 000000000..b0b67e6d1 --- /dev/null +++ b/arch/s390/lib/test_unwind.c @@ -0,0 +1,353 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test module for unwind_for_each_frame + */ + +#define pr_fmt(fmt) "test_unwind: " fmt +#include <asm/unwind.h> +#include <linux/completion.h> +#include <linux/kallsyms.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/kprobes.h> +#include <linux/wait.h> +#include <asm/irq.h> +#include <asm/delay.h> + +#define BT_BUF_SIZE (PAGE_SIZE * 4) + +/* + * To avoid printk line limit split backtrace by lines + */ +static void print_backtrace(char *bt) +{ + char *p; + + while (true) { + p = strsep(&bt, "\n"); + if (!p) + break; + pr_err("%s\n", p); + } +} + +/* + * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result + * contains unwindme_func2 followed by unwindme_func1. + */ +static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, + unsigned long sp) +{ + int frame_count, prev_is_func2, seen_func2_func1; + const int max_frames = 128; + struct unwind_state state; + size_t bt_pos = 0; + int ret = 0; + char *bt; + + bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC); + if (!bt) { + pr_err("failed to allocate backtrace buffer\n"); + return -ENOMEM; + } + /* Unwind. */ + frame_count = 0; + prev_is_func2 = 0; + seen_func2_func1 = 0; + unwind_for_each_frame(&state, task, regs, sp) { + unsigned long addr = unwind_get_return_address(&state); + char sym[KSYM_SYMBOL_LEN]; + + if (frame_count++ == max_frames) + break; + if (state.reliable && !addr) { + pr_err("unwind state reliable but addr is 0\n"); + kfree(bt); + return -EINVAL; + } + sprint_symbol(sym, addr); + if (bt_pos < BT_BUF_SIZE) { + bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos, + state.reliable ? " [%-7s%px] %pSR\n" : + "([%-7s%px] %pSR)\n", + stack_type_name(state.stack_info.type), + (void *)state.sp, (void *)state.ip); + if (bt_pos >= BT_BUF_SIZE) + pr_err("backtrace buffer is too small\n"); + } + frame_count += 1; + if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1")) + seen_func2_func1 = 1; + prev_is_func2 = str_has_prefix(sym, "unwindme_func2"); + } + + /* Check the results. */ + if (unwind_error(&state)) { + pr_err("unwind error\n"); + ret = -EINVAL; + } + if (!seen_func2_func1) { + pr_err("unwindme_func2 and unwindme_func1 not found\n"); + ret = -EINVAL; + } + if (frame_count == max_frames) { + pr_err("Maximum number of frames exceeded\n"); + ret = -EINVAL; + } + if (ret) + print_backtrace(bt); + kfree(bt); + return ret; +} + +/* State of the task being unwound. */ +struct unwindme { + int flags; + int ret; + struct task_struct *task; + struct completion task_ready; + wait_queue_head_t task_wq; + unsigned long sp; +}; + +static struct unwindme *unwindme; + +/* Values of unwindme.flags. */ +#define UWM_DEFAULT 0x0 +#define UWM_THREAD 0x1 /* Unwind a separate task. */ +#define UWM_REGS 0x2 /* Pass regs to test_unwind(). */ +#define UWM_SP 0x4 /* Pass sp to test_unwind(). */ +#define UWM_CALLER 0x8 /* Unwind starting from caller. */ +#define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */ +#define UWM_IRQ 0x20 /* Unwind from irq context. */ +#define UWM_PGM 0x40 /* Unwind from program check handler. */ + +static __always_inline unsigned long get_psw_addr(void) +{ + unsigned long psw_addr; + + asm volatile( + "basr %[psw_addr],0\n" + : [psw_addr] "=d" (psw_addr)); + return psw_addr; +} + +#ifdef CONFIG_KPROBES +static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct unwindme *u = unwindme; + + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, + (u->flags & UWM_SP) ? u->sp : 0); + return 0; +} +#endif + +/* This function may or may not appear in the backtrace. */ +static noinline int unwindme_func4(struct unwindme *u) +{ + if (!(u->flags & UWM_CALLER)) + u->sp = current_frame_address(); + if (u->flags & UWM_THREAD) { + complete(&u->task_ready); + wait_event(u->task_wq, kthread_should_park()); + kthread_parkme(); + return 0; +#ifdef CONFIG_KPROBES + } else if (u->flags & UWM_PGM) { + struct kprobe kp; + int ret; + + unwindme = u; + memset(&kp, 0, sizeof(kp)); + kp.symbol_name = "do_report_trap"; + kp.pre_handler = pgm_pre_handler; + ret = register_kprobe(&kp); + if (ret < 0) { + pr_err("register_kprobe failed %d\n", ret); + return -EINVAL; + } + + /* + * Trigger operation exception; use insn notation to bypass + * llvm's integrated assembler sanity checks. + */ + asm volatile( + " .insn e,0x0000\n" /* illegal opcode */ + "0: nopr %%r7\n" + EX_TABLE(0b, 0b) + :); + + unregister_kprobe(&kp); + unwindme = NULL; + return u->ret; +#endif + } else { + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + regs.psw.addr = get_psw_addr(); + regs.gprs[15] = current_stack_pointer(); + return test_unwind(NULL, + (u->flags & UWM_REGS) ? ®s : NULL, + (u->flags & UWM_SP) ? u->sp : 0); + } +} + +/* This function may or may not appear in the backtrace. */ +static noinline int unwindme_func3(struct unwindme *u) +{ + u->sp = current_frame_address(); + return unwindme_func4(u); +} + +/* This function must appear in the backtrace. */ +static noinline int unwindme_func2(struct unwindme *u) +{ + unsigned long flags; + int rc; + + if (u->flags & UWM_SWITCH_STACK) { + local_irq_save(flags); + local_mcck_disable(); + rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); + local_mcck_enable(); + local_irq_restore(flags); + return rc; + } else { + return unwindme_func3(u); + } +} + +/* This function must follow unwindme_func2 in the backtrace. */ +static noinline int unwindme_func1(void *u) +{ + return unwindme_func2((struct unwindme *)u); +} + +static void unwindme_irq_handler(struct ext_code ext_code, + unsigned int param32, + unsigned long param64) +{ + struct unwindme *u = READ_ONCE(unwindme); + + if (u && u->task == current) { + unwindme = NULL; + u->task = NULL; + u->ret = unwindme_func1(u); + } +} + +static int test_unwind_irq(struct unwindme *u) +{ + preempt_disable(); + if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) { + pr_info("Couldn't register external interrupt handler"); + return -1; + } + u->task = current; + unwindme = u; + udelay(1); + unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler); + preempt_enable(); + return u->ret; +} + +/* Spawns a task and passes it to test_unwind(). */ +static int test_unwind_task(struct unwindme *u) +{ + struct task_struct *task; + int ret; + + /* Initialize thread-related fields. */ + init_completion(&u->task_ready); + init_waitqueue_head(&u->task_wq); + + /* + * Start the task and wait until it reaches unwindme_func4() and sleeps + * in (task_ready, unwind_done] range. + */ + task = kthread_run(unwindme_func1, u, "%s", __func__); + if (IS_ERR(task)) { + pr_err("kthread_run() failed\n"); + return PTR_ERR(task); + } + /* + * Make sure task reaches unwindme_func4 before parking it, + * we might park it before kthread function has been executed otherwise + */ + wait_for_completion(&u->task_ready); + kthread_park(task); + /* Unwind. */ + ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0); + kthread_stop(task); + return ret; +} + +static int test_unwind_flags(int flags) +{ + struct unwindme u; + + u.flags = flags; + if (u.flags & UWM_THREAD) + return test_unwind_task(&u); + else if (u.flags & UWM_IRQ) + return test_unwind_irq(&u); + else + return unwindme_func1(&u); +} + +static int test_unwind_init(void) +{ + int ret = 0; + +#define TEST(flags) \ +do { \ + pr_info("[ RUN ] " #flags "\n"); \ + if (!test_unwind_flags((flags))) { \ + pr_info("[ OK ] " #flags "\n"); \ + } else { \ + pr_err("[ FAILED ] " #flags "\n"); \ + ret = -EINVAL; \ + } \ +} while (0) + + TEST(UWM_DEFAULT); + TEST(UWM_SP); + TEST(UWM_REGS); + TEST(UWM_SWITCH_STACK); + TEST(UWM_SP | UWM_REGS); + TEST(UWM_CALLER | UWM_SP); + TEST(UWM_CALLER | UWM_SP | UWM_REGS); + TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); + TEST(UWM_THREAD); + TEST(UWM_THREAD | UWM_SP); + TEST(UWM_THREAD | UWM_CALLER | UWM_SP); + TEST(UWM_IRQ); + TEST(UWM_IRQ | UWM_SWITCH_STACK); + TEST(UWM_IRQ | UWM_SP); + TEST(UWM_IRQ | UWM_REGS); + TEST(UWM_IRQ | UWM_SP | UWM_REGS); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS); + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); +#ifdef CONFIG_KPROBES + TEST(UWM_PGM); + TEST(UWM_PGM | UWM_SP); + TEST(UWM_PGM | UWM_REGS); + TEST(UWM_PGM | UWM_SP | UWM_REGS); +#endif +#undef TEST + + return ret; +} + +static void test_unwind_exit(void) +{ +} + +module_init(test_unwind_init); +module_exit(test_unwind_exit); +MODULE_LICENSE("GPL"); diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c new file mode 100644 index 000000000..fcfd78f99 --- /dev/null +++ b/arch/s390/lib/uaccess.c @@ -0,0 +1,448 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright IBM Corp. 2006,2014 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/jump_label.h> +#include <linux/uaccess.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/mmu_context.h> +#include <asm/facility.h> + +#ifndef CONFIG_HAVE_MARCH_Z10_FEATURES +static DEFINE_STATIC_KEY_FALSE(have_mvcos); + +static int __init uaccess_init(void) +{ + if (test_facility(27)) + static_branch_enable(&have_mvcos); + return 0; +} +early_initcall(uaccess_init); + +static inline int copy_with_mvcos(void) +{ + if (static_branch_likely(&have_mvcos)) + return 1; + return 0; +} +#else +static inline int copy_with_mvcos(void) +{ + return 1; +} +#endif + +void set_fs(mm_segment_t fs) +{ + current->thread.mm_segment = fs; + if (fs == USER_DS) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + clear_cpu_flag(CIF_ASCE_PRIMARY); + } else { + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + set_cpu_flag(CIF_ASCE_PRIMARY); + } + if (fs & 1) { + if (fs == USER_DS_SACF) + __ctl_load(S390_lowcore.user_asce, 7, 7); + else + __ctl_load(S390_lowcore.kernel_asce, 7, 7); + set_cpu_flag(CIF_ASCE_SECONDARY); + } +} +EXPORT_SYMBOL(set_fs); + +mm_segment_t enable_sacf_uaccess(void) +{ + mm_segment_t old_fs; + unsigned long asce, cr; + unsigned long flags; + + old_fs = current->thread.mm_segment; + if (old_fs & 1) + return old_fs; + /* protect against a concurrent page table upgrade */ + local_irq_save(flags); + current->thread.mm_segment |= 1; + asce = S390_lowcore.kernel_asce; + if (likely(old_fs == USER_DS)) { + __ctl_store(cr, 1, 1); + if (cr != S390_lowcore.kernel_asce) { + __ctl_load(S390_lowcore.kernel_asce, 1, 1); + set_cpu_flag(CIF_ASCE_PRIMARY); + } + asce = S390_lowcore.user_asce; + } + __ctl_store(cr, 7, 7); + if (cr != asce) { + __ctl_load(asce, 7, 7); + set_cpu_flag(CIF_ASCE_SECONDARY); + } + local_irq_restore(flags); + return old_fs; +} +EXPORT_SYMBOL(enable_sacf_uaccess); + +void disable_sacf_uaccess(mm_segment_t old_fs) +{ + current->thread.mm_segment = old_fs; + if (old_fs == USER_DS && test_facility(27)) { + __ctl_load(S390_lowcore.user_asce, 1, 1); + clear_cpu_flag(CIF_ASCE_PRIMARY); + } +} +EXPORT_SYMBOL(disable_sacf_uaccess); + +static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x01UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + "6: jz 4f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + "7: slgr %0,%4\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, + unsigned long size) +{ + unsigned long tmp1, tmp2; + mm_segment_t old_fs; + + old_fs = enable_sacf_uaccess(); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcp 0(%0,%2),0(%1),%3\n" + "7: jz 5f\n" + "1: algr %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + "8: jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " lghi %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + "9: slgr %0,%4\n" + " j 6f\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + disable_sacf_uaccess(old_fs); + return size; +} + +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (copy_with_mvcos()) + return copy_from_user_mvcos(to, from, n); + return copy_from_user_mvcp(to, from, n); +} +EXPORT_SYMBOL(raw_copy_from_user); + +static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x010000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + "6: jz 4f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + "7: slgr %0,%4\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, + unsigned long size) +{ + unsigned long tmp1, tmp2; + mm_segment_t old_fs; + + old_fs = enable_sacf_uaccess(); + tmp1 = -256UL; + asm volatile( + " sacf 0\n" + "0: mvcs 0(%0,%1),0(%2),%3\n" + "7: jz 5f\n" + "1: algr %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + "8: jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " lghi %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " slgr %4,%1\n" + " clgr %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + "9: slgr %0,%4\n" + " j 6f\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + disable_sacf_uaccess(old_fs); + return size; +} + +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (copy_with_mvcos()) + return copy_to_user_mvcos(to, from, n); + return copy_to_user_mvcs(to, from, n); +} +EXPORT_SYMBOL(raw_copy_to_user); + +static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x010001UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1: algr %0,%3\n" + " slgr %1,%3\n" + " slgr %2,%3\n" + " j 0b\n" + "2:slgr %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from, + unsigned long size) +{ + mm_segment_t old_fs; + unsigned long tmp1; + + old_fs = enable_sacf_uaccess(); + asm volatile( + " sacf 256\n" + " aghi %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + "0: aghi %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " aghi %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3: aghi %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + disable_sacf_uaccess(old_fs); + return size; +} + +unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) +{ + if (copy_with_mvcos()) + return copy_in_user_mvcos(to, from, n); + return copy_in_user_mvc(to, from, n); +} +EXPORT_SYMBOL(raw_copy_in_user); + +static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) +{ + register unsigned long reg0 asm("0") = 0x010000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1: algr %0,%2\n" + " slgr %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " slgr %3,%1\n" + " clgr %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " slgr %0,%3\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+&a" (size), "+&a" (to), "+a" (tmp1), "=&a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +static inline unsigned long clear_user_xc(void __user *to, unsigned long size) +{ + mm_segment_t old_fs; + unsigned long tmp1, tmp2; + + old_fs = enable_sacf_uaccess(); + asm volatile( + " sacf 256\n" + " aghi %0,-1\n" + " jo 5f\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0: aghi %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " slgr %2,%1\n" + " clgr %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " aghi %2,-1\n" + "1: ex %2,0(%3)\n" + " aghi %2,1\n" + " slgr %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3: aghi %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + disable_sacf_uaccess(old_fs); + return size; +} + +unsigned long __clear_user(void __user *to, unsigned long size) +{ + if (copy_with_mvcos()) + return clear_user_mvcos(to, size); + return clear_user_xc(to, size); +} +EXPORT_SYMBOL(__clear_user); + +static inline unsigned long strnlen_user_srst(const char __user *src, + unsigned long size) +{ + register unsigned long reg0 asm("0") = 0; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " slgr %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " slgr %0,%1\n" + "1: sacf 768\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +unsigned long __strnlen_user(const char __user *src, unsigned long size) +{ + mm_segment_t old_fs; + unsigned long len; + + if (unlikely(!size)) + return 0; + old_fs = enable_sacf_uaccess(); + len = strnlen_user_srst(src, size); + disable_sacf_uaccess(old_fs); + return len; +} +EXPORT_SYMBOL(__strnlen_user); + +long __strncpy_from_user(char *dst, const char __user *src, long size) +{ + size_t done, len, offset, len_str; + + if (unlikely(size <= 0)) + return 0; + done = 0; + do { + offset = (size_t)src & (L1_CACHE_BYTES - 1); + len = min(size - done, L1_CACHE_BYTES - offset); + if (copy_from_user(dst, src, len)) + return -EFAULT; + len_str = strnlen(dst, len); + done += len_str; + src += len_str; + dst += len_str; + } while ((len_str == len) && (done < size)); + return done; +} +EXPORT_SYMBOL(__strncpy_from_user); diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c new file mode 100644 index 000000000..29d9470db --- /dev/null +++ b/arch/s390/lib/xor.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Optimized xor_block operation for RAID4/5 + * + * Copyright IBM Corp. 2016 + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#include <linux/types.h> +#include <linux/export.h> +#include <linux/raid/xor.h> +#include <asm/xor.h> + +static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) +{ + asm volatile( + " larl 1,2f\n" + " aghi %0,-1\n" + " jm 3f\n" + " srlg 0,%0,8\n" + " ltgr 0,0\n" + " jz 1f\n" + "0: xc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + " brctg 0,0b\n" + "1: ex %0,0(1)\n" + " j 3f\n" + "2: xc 0(1,%1),0(%2)\n" + "3:\n" + : : "d" (bytes), "a" (p1), "a" (p2) + : "0", "1", "cc", "memory"); +} + +static void xor_xc_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3) +{ + asm volatile( + " larl 1,2f\n" + " aghi %0,-1\n" + " jm 3f\n" + " srlg 0,%0,8\n" + " ltgr 0,0\n" + " jz 1f\n" + "0: xc 0(256,%1),0(%2)\n" + " xc 0(256,%1),0(%3)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + " la %3,256(%3)\n" + " brctg 0,0b\n" + "1: ex %0,0(1)\n" + " ex %0,6(1)\n" + " j 3f\n" + "2: xc 0(1,%1),0(%2)\n" + " xc 0(1,%1),0(%3)\n" + "3:\n" + : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3) + : : "0", "1", "cc", "memory"); +} + +static void xor_xc_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4) +{ + asm volatile( + " larl 1,2f\n" + " aghi %0,-1\n" + " jm 3f\n" + " srlg 0,%0,8\n" + " ltgr 0,0\n" + " jz 1f\n" + "0: xc 0(256,%1),0(%2)\n" + " xc 0(256,%1),0(%3)\n" + " xc 0(256,%1),0(%4)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + " la %3,256(%3)\n" + " la %4,256(%4)\n" + " brctg 0,0b\n" + "1: ex %0,0(1)\n" + " ex %0,6(1)\n" + " ex %0,12(1)\n" + " j 3f\n" + "2: xc 0(1,%1),0(%2)\n" + " xc 0(1,%1),0(%3)\n" + " xc 0(1,%1),0(%4)\n" + "3:\n" + : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4) + : : "0", "1", "cc", "memory"); +} + +static void xor_xc_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, + unsigned long *p3, unsigned long *p4, unsigned long *p5) +{ + /* Get around a gcc oddity */ + register unsigned long *reg7 asm ("7") = p5; + + asm volatile( + " larl 1,2f\n" + " aghi %0,-1\n" + " jm 3f\n" + " srlg 0,%0,8\n" + " ltgr 0,0\n" + " jz 1f\n" + "0: xc 0(256,%1),0(%2)\n" + " xc 0(256,%1),0(%3)\n" + " xc 0(256,%1),0(%4)\n" + " xc 0(256,%1),0(%5)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + " la %3,256(%3)\n" + " la %4,256(%4)\n" + " la %5,256(%5)\n" + " brctg 0,0b\n" + "1: ex %0,0(1)\n" + " ex %0,6(1)\n" + " ex %0,12(1)\n" + " ex %0,18(1)\n" + " j 3f\n" + "2: xc 0(1,%1),0(%2)\n" + " xc 0(1,%1),0(%3)\n" + " xc 0(1,%1),0(%4)\n" + " xc 0(1,%1),0(%5)\n" + "3:\n" + : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4), + "+a" (reg7) + : : "0", "1", "cc", "memory"); +} + +struct xor_block_template xor_block_xc = { + .name = "xc", + .do_2 = xor_xc_2, + .do_3 = xor_xc_3, + .do_4 = xor_xc_4, + .do_5 = xor_xc_5, +}; +EXPORT_SYMBOL(xor_block_xc); |