diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /tools/testing/selftests/powerpc/include | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/testing/selftests/powerpc/include')
-rw-r--r-- | tools/testing/selftests/powerpc/include/basic_asm.h | 105 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/fpu_asm.h | 76 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/gpr_asm.h | 92 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/instructions.h | 146 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/pkeys.h | 136 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/reg.h | 170 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/subunit.h | 52 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/utils.h | 163 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/vmx_asm.h | 92 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/include/vsx_asm.h | 67 |
10 files changed, 1099 insertions, 0 deletions
diff --git a/tools/testing/selftests/powerpc/include/basic_asm.h b/tools/testing/selftests/powerpc/include/basic_asm.h new file mode 100644 index 000000000..26cde8ea1 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/basic_asm.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SELFTESTS_POWERPC_BASIC_ASM_H +#define _SELFTESTS_POWERPC_BASIC_ASM_H + +#include <ppc-asm.h> +#include <asm/unistd.h> + +#ifdef __powerpc64__ +#define PPC_LL ld +#define PPC_STL std +#define PPC_STLU stdu +#else +#define PPC_LL lwz +#define PPC_STL stw +#define PPC_STLU stwu +#endif + +#define LOAD_REG_IMMEDIATE(reg, expr) \ + lis reg, (expr)@highest; \ + ori reg, reg, (expr)@higher; \ + rldicr reg, reg, 32, 31; \ + oris reg, reg, (expr)@high; \ + ori reg, reg, (expr)@l; + +/* + * Note: These macros assume that variables being stored on the stack are + * sizeof(long), while this is usually the case it may not always be the + * case for each use case. + */ +#ifdef __powerpc64__ + +// ABIv2 +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STACK_FRAME_MIN_SIZE 32 +#define STACK_FRAME_TOC_POS 24 +#define __STACK_FRAME_PARAM(_param) (32 + ((_param)*8)) +#define __STACK_FRAME_LOCAL(_num_params, _var_num) \ + ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*8)) + +#else // ABIv1 below +#define STACK_FRAME_MIN_SIZE 112 +#define STACK_FRAME_TOC_POS 40 +#define __STACK_FRAME_PARAM(i) (48 + ((i)*8)) + +/* + * Caveat: if a function passed more than 8 doublewords, the caller will have + * made more space... which would render the 112 incorrect. + */ +#define __STACK_FRAME_LOCAL(_num_params, _var_num) \ + (112 + ((_var_num)*8)) + + +#endif // ABIv2 + +// Common 64-bit +#define STACK_FRAME_LR_POS 16 +#define STACK_FRAME_CR_POS 8 + +#else // 32-bit below + +#define STACK_FRAME_MIN_SIZE 16 +#define STACK_FRAME_LR_POS 4 + +#define __STACK_FRAME_PARAM(_param) (STACK_FRAME_MIN_SIZE + ((_param)*4)) +#define __STACK_FRAME_LOCAL(_num_params, _var_num) \ + ((STACK_FRAME_PARAM(_num_params)) + ((_var_num)*4)) + +#endif // __powerpc64__ + +/* Parameter x saved to the stack */ +#define STACK_FRAME_PARAM(var) __STACK_FRAME_PARAM(var) + +/* Local variable x saved to the stack after x parameters */ +#define STACK_FRAME_LOCAL(num_params, var) \ + __STACK_FRAME_LOCAL(num_params, var) + +/* + * It is very important to note here that _extra is the extra amount of + * stack space needed. This space can be accessed using STACK_FRAME_PARAM() + * or STACK_FRAME_LOCAL() macros. + * + * r1 and r2 are not defined in ppc-asm.h (instead they are defined as sp + * and toc). Kernel programmers tend to prefer rX even for r1 and r2, hence + * %1 and %r2. r0 is defined in ppc-asm.h and therefore %r0 gets + * preprocessed incorrectly, hence r0. + */ +#define PUSH_BASIC_STACK(_extra) \ + mflr r0; \ + PPC_STL r0, STACK_FRAME_LR_POS(%r1); \ + PPC_STLU %r1, -(((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE)(%r1); + +#define POP_BASIC_STACK(_extra) \ + addi %r1, %r1, (((_extra + 15) & ~15) + STACK_FRAME_MIN_SIZE); \ + PPC_LL r0, STACK_FRAME_LR_POS(%r1); \ + mtlr r0; + +.macro OP_REGS op, reg_width, start_reg, end_reg, base_reg, base_reg_offset=0, skip=0 + .set i, \start_reg + .rept (\end_reg - \start_reg + 1) + \op i, (\reg_width * (i - \skip) + \base_reg_offset)(\base_reg) + .set i, i + 1 + .endr +.endm + +#endif /* _SELFTESTS_POWERPC_BASIC_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/fpu_asm.h b/tools/testing/selftests/powerpc/include/fpu_asm.h new file mode 100644 index 000000000..58ac2ce33 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/fpu_asm.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016, Cyril Bur, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_FPU_ASM_H +#define _SELFTESTS_POWERPC_FPU_ASM_H +#include "basic_asm.h" + +#define PUSH_FPU(stack_size) \ + stfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ + stfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ + stfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ + stfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ + stfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ + stfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ + stfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ + stfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ + stfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ + stfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ + stfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ + stfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ + stfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ + stfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ + stfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ + stfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ + stfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ + stfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); + +#define POP_FPU(stack_size) \ + lfd f31,(stack_size + STACK_FRAME_MIN_SIZE)(%r1); \ + lfd f30,(stack_size + STACK_FRAME_MIN_SIZE - 8)(%r1); \ + lfd f29,(stack_size + STACK_FRAME_MIN_SIZE - 16)(%r1); \ + lfd f28,(stack_size + STACK_FRAME_MIN_SIZE - 24)(%r1); \ + lfd f27,(stack_size + STACK_FRAME_MIN_SIZE - 32)(%r1); \ + lfd f26,(stack_size + STACK_FRAME_MIN_SIZE - 40)(%r1); \ + lfd f25,(stack_size + STACK_FRAME_MIN_SIZE - 48)(%r1); \ + lfd f24,(stack_size + STACK_FRAME_MIN_SIZE - 56)(%r1); \ + lfd f23,(stack_size + STACK_FRAME_MIN_SIZE - 64)(%r1); \ + lfd f22,(stack_size + STACK_FRAME_MIN_SIZE - 72)(%r1); \ + lfd f21,(stack_size + STACK_FRAME_MIN_SIZE - 80)(%r1); \ + lfd f20,(stack_size + STACK_FRAME_MIN_SIZE - 88)(%r1); \ + lfd f19,(stack_size + STACK_FRAME_MIN_SIZE - 96)(%r1); \ + lfd f18,(stack_size + STACK_FRAME_MIN_SIZE - 104)(%r1); \ + lfd f17,(stack_size + STACK_FRAME_MIN_SIZE - 112)(%r1); \ + lfd f16,(stack_size + STACK_FRAME_MIN_SIZE - 120)(%r1); \ + lfd f15,(stack_size + STACK_FRAME_MIN_SIZE - 128)(%r1); \ + lfd f14,(stack_size + STACK_FRAME_MIN_SIZE - 136)(%r1); + +/* + * Careful calling this, it will 'clobber' fpu (by design) + * Don't call this from C + */ +FUNC_START(load_fpu) + lfd f14,0(r3) + lfd f15,8(r3) + lfd f16,16(r3) + lfd f17,24(r3) + lfd f18,32(r3) + lfd f19,40(r3) + lfd f20,48(r3) + lfd f21,56(r3) + lfd f22,64(r3) + lfd f23,72(r3) + lfd f24,80(r3) + lfd f25,88(r3) + lfd f26,96(r3) + lfd f27,104(r3) + lfd f28,112(r3) + lfd f29,120(r3) + lfd f30,128(r3) + lfd f31,136(r3) + blr +FUNC_END(load_fpu) + +#endif /* _SELFTESTS_POWERPC_FPU_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/gpr_asm.h b/tools/testing/selftests/powerpc/include/gpr_asm.h new file mode 100644 index 000000000..5db74f5c6 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/gpr_asm.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2016, Cyril Bur, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_GPR_ASM_H +#define _SELFTESTS_POWERPC_GPR_ASM_H + +#include "basic_asm.h" + +#define __PUSH_NVREGS(top_pos); \ + std r31,(top_pos)(%r1); \ + std r30,(top_pos - 8)(%r1); \ + std r29,(top_pos - 16)(%r1); \ + std r28,(top_pos - 24)(%r1); \ + std r27,(top_pos - 32)(%r1); \ + std r26,(top_pos - 40)(%r1); \ + std r25,(top_pos - 48)(%r1); \ + std r24,(top_pos - 56)(%r1); \ + std r23,(top_pos - 64)(%r1); \ + std r22,(top_pos - 72)(%r1); \ + std r21,(top_pos - 80)(%r1); \ + std r20,(top_pos - 88)(%r1); \ + std r19,(top_pos - 96)(%r1); \ + std r18,(top_pos - 104)(%r1); \ + std r17,(top_pos - 112)(%r1); \ + std r16,(top_pos - 120)(%r1); \ + std r15,(top_pos - 128)(%r1); \ + std r14,(top_pos - 136)(%r1) + +#define __POP_NVREGS(top_pos); \ + ld r31,(top_pos)(%r1); \ + ld r30,(top_pos - 8)(%r1); \ + ld r29,(top_pos - 16)(%r1); \ + ld r28,(top_pos - 24)(%r1); \ + ld r27,(top_pos - 32)(%r1); \ + ld r26,(top_pos - 40)(%r1); \ + ld r25,(top_pos - 48)(%r1); \ + ld r24,(top_pos - 56)(%r1); \ + ld r23,(top_pos - 64)(%r1); \ + ld r22,(top_pos - 72)(%r1); \ + ld r21,(top_pos - 80)(%r1); \ + ld r20,(top_pos - 88)(%r1); \ + ld r19,(top_pos - 96)(%r1); \ + ld r18,(top_pos - 104)(%r1); \ + ld r17,(top_pos - 112)(%r1); \ + ld r16,(top_pos - 120)(%r1); \ + ld r15,(top_pos - 128)(%r1); \ + ld r14,(top_pos - 136)(%r1) + +#define PUSH_NVREGS(stack_size) \ + __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) + +/* 18 NV FPU REGS */ +#define PUSH_NVREGS_BELOW_FPU(stack_size) \ + __PUSH_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) + +#define POP_NVREGS(stack_size) \ + __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE) + +/* 18 NV FPU REGS */ +#define POP_NVREGS_BELOW_FPU(stack_size) \ + __POP_NVREGS(stack_size + STACK_FRAME_MIN_SIZE - (18 * 8)) + +/* + * Careful calling this, it will 'clobber' NVGPRs (by design) + * Don't call this from C + */ +FUNC_START(load_gpr) + ld r14,0(r3) + ld r15,8(r3) + ld r16,16(r3) + ld r17,24(r3) + ld r18,32(r3) + ld r19,40(r3) + ld r20,48(r3) + ld r21,56(r3) + ld r22,64(r3) + ld r23,72(r3) + ld r24,80(r3) + ld r25,88(r3) + ld r26,96(r3) + ld r27,104(r3) + ld r28,112(r3) + ld r29,120(r3) + ld r30,128(r3) + ld r31,136(r3) + blr +FUNC_END(load_gpr) + + +#endif /* _SELFTESTS_POWERPC_GPR_ASM_H */ diff --git a/tools/testing/selftests/powerpc/include/instructions.h b/tools/testing/selftests/powerpc/include/instructions.h new file mode 100644 index 000000000..4efa6314b --- /dev/null +++ b/tools/testing/selftests/powerpc/include/instructions.h @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SELFTESTS_POWERPC_INSTRUCTIONS_H +#define _SELFTESTS_POWERPC_INSTRUCTIONS_H + +#include <stdio.h> +#include <stdlib.h> + +/* This defines the "copy" instruction from Power ISA 3.0 Book II, section 4.4. */ +#define __COPY(RA, RB, L) \ + (0x7c00060c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10)) +#define COPY(RA, RB, L) \ + .long __COPY((RA), (RB), (L)) + +static inline void copy(void *i) +{ + asm volatile(str(COPY(0, %0, 0))";" + : + : "b" (i) + : "memory" + ); +} + +static inline void copy_first(void *i) +{ + asm volatile(str(COPY(0, %0, 1))";" + : + : "b" (i) + : "memory" + ); +} + +/* This defines the "paste" instruction from Power ISA 3.0 Book II, section 4.4. */ +#define __PASTE(RA, RB, L, RC) \ + (0x7c00070c | (RA) << (31-15) | (RB) << (31-20) | (L) << (31-10) | (RC) << (31-31)) +#define PASTE(RA, RB, L, RC) \ + .long __PASTE((RA), (RB), (L), (RC)) + +static inline int paste(void *i) +{ + int cr; + + asm volatile(str(PASTE(0, %1, 0, 0))";" + "mfcr %0;" + : "=r" (cr) + : "b" (i) + : "memory" + ); + return cr; +} + +static inline int paste_last(void *i) +{ + int cr; + + asm volatile(str(PASTE(0, %1, 1, 1))";" + "mfcr %0;" + : "=r" (cr) + : "b" (i) + : "memory" + ); + return cr; +} + +#define PPC_INST_COPY __COPY(0, 0, 0) +#define PPC_INST_COPY_FIRST __COPY(0, 0, 1) +#define PPC_INST_PASTE __PASTE(0, 0, 0, 0) +#define PPC_INST_PASTE_LAST __PASTE(0, 0, 1, 1) + +/* This defines the prefixed load/store instructions */ +#ifdef __ASSEMBLY__ +# define stringify_in_c(...) __VA_ARGS__ +#else +# define __stringify_in_c(...) #__VA_ARGS__ +# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " " +#endif + +#define __PPC_RA(a) (((a) & 0x1f) << 16) +#define __PPC_RS(s) (((s) & 0x1f) << 21) +#define __PPC_RT(t) __PPC_RS(t) +#define __PPC_PREFIX_R(r) (((r) & 0x1) << 20) + +#define PPC_PREFIX_MLS 0x06000000 +#define PPC_PREFIX_8LS 0x04000000 + +#define PPC_INST_LBZ 0x88000000 +#define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LHA 0xa8000000 +#define PPC_INST_LWZ 0x80000000 +#define PPC_INST_STB 0x98000000 +#define PPC_INST_STH 0xb0000000 +#define PPC_INST_STW 0x90000000 +#define PPC_INST_STD 0xf8000000 +#define PPC_INST_LFS 0xc0000000 +#define PPC_INST_LFD 0xc8000000 +#define PPC_INST_STFS 0xd0000000 +#define PPC_INST_STFD 0xd8000000 + +#define PREFIX_MLS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \ + stringify_in_c(.long PPC_PREFIX_MLS | \ + __PPC_PREFIX_R(r) | \ + (((d) >> 16) & 0x3ffff);) \ + stringify_in_c(.long (instr) | \ + __PPC_RT(t) | \ + __PPC_RA(a) | \ + ((d) & 0xffff);\n) + +#define PREFIX_8LS(instr, t, a, r, d) stringify_in_c(.balign 64, , 4;) \ + stringify_in_c(.long PPC_PREFIX_8LS | \ + __PPC_PREFIX_R(r) | \ + (((d) >> 16) & 0x3ffff);) \ + stringify_in_c(.long (instr) | \ + __PPC_RT(t) | \ + __PPC_RA(a) | \ + ((d) & 0xffff);\n) + +/* Prefixed Integer Load/Store instructions */ +#define PLBZ(t, a, r, d) PREFIX_MLS(PPC_INST_LBZ, t, a, r, d) +#define PLHZ(t, a, r, d) PREFIX_MLS(PPC_INST_LHZ, t, a, r, d) +#define PLHA(t, a, r, d) PREFIX_MLS(PPC_INST_LHA, t, a, r, d) +#define PLWZ(t, a, r, d) PREFIX_MLS(PPC_INST_LWZ, t, a, r, d) +#define PLWA(t, a, r, d) PREFIX_8LS(0xa4000000, t, a, r, d) +#define PLD(t, a, r, d) PREFIX_8LS(0xe4000000, t, a, r, d) +#define PLQ(t, a, r, d) PREFIX_8LS(0xe0000000, t, a, r, d) +#define PSTB(s, a, r, d) PREFIX_MLS(PPC_INST_STB, s, a, r, d) +#define PSTH(s, a, r, d) PREFIX_MLS(PPC_INST_STH, s, a, r, d) +#define PSTW(s, a, r, d) PREFIX_MLS(PPC_INST_STW, s, a, r, d) +#define PSTD(s, a, r, d) PREFIX_8LS(0xf4000000, s, a, r, d) +#define PSTQ(s, a, r, d) PREFIX_8LS(0xf0000000, s, a, r, d) + +/* Prefixed Floating-Point Load/Store Instructions */ +#define PLFS(frt, a, r, d) PREFIX_MLS(PPC_INST_LFS, frt, a, r, d) +#define PLFD(frt, a, r, d) PREFIX_MLS(PPC_INST_LFD, frt, a, r, d) +#define PSTFS(frs, a, r, d) PREFIX_MLS(PPC_INST_STFS, frs, a, r, d) +#define PSTFD(frs, a, r, d) PREFIX_MLS(PPC_INST_STFD, frs, a, r, d) + +/* Prefixed VSX Load/Store Instructions */ +#define PLXSD(vrt, a, r, d) PREFIX_8LS(0xa8000000, vrt, a, r, d) +#define PLXSSP(vrt, a, r, d) PREFIX_8LS(0xac000000, vrt, a, r, d) +#define PLXV0(s, a, r, d) PREFIX_8LS(0xc8000000, s, a, r, d) +#define PLXV1(s, a, r, d) PREFIX_8LS(0xcc000000, s, a, r, d) +#define PSTXSD(vrs, a, r, d) PREFIX_8LS(0xb8000000, vrs, a, r, d) +#define PSTXSSP(vrs, a, r, d) PREFIX_8LS(0xbc000000, vrs, a, r, d) +#define PSTXV0(s, a, r, d) PREFIX_8LS(0xd8000000, s, a, r, d) +#define PSTXV1(s, a, r, d) PREFIX_8LS(0xdc000000, s, a, r, d) + +#endif /* _SELFTESTS_POWERPC_INSTRUCTIONS_H */ diff --git a/tools/testing/selftests/powerpc/include/pkeys.h b/tools/testing/selftests/powerpc/include/pkeys.h new file mode 100644 index 000000000..3312cb1b0 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/pkeys.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020, Sandipan Das, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_PKEYS_H +#define _SELFTESTS_POWERPC_PKEYS_H + +#include <sys/mman.h> + +#include "reg.h" +#include "utils.h" + +/* + * Older versions of libc use the Intel-specific access rights. + * Hence, override the definitions as they might be incorrect. + */ +#undef PKEY_DISABLE_ACCESS +#define PKEY_DISABLE_ACCESS 0x3 + +#undef PKEY_DISABLE_WRITE +#define PKEY_DISABLE_WRITE 0x2 + +#undef PKEY_DISABLE_EXECUTE +#define PKEY_DISABLE_EXECUTE 0x4 + +/* Older versions of libc do not not define this */ +#ifndef SEGV_PKUERR +#define SEGV_PKUERR 4 +#endif + +#define SI_PKEY_OFFSET 0x20 + +#define __NR_pkey_mprotect 386 +#define __NR_pkey_alloc 384 +#define __NR_pkey_free 385 + +#define PKEY_BITS_PER_PKEY 2 +#define NR_PKEYS 32 +#define PKEY_BITS_MASK ((1UL << PKEY_BITS_PER_PKEY) - 1) + +inline unsigned long pkeyreg_get(void) +{ + return mfspr(SPRN_AMR); +} + +inline void pkeyreg_set(unsigned long amr) +{ + set_amr(amr); +} + +void pkey_set_rights(int pkey, unsigned long rights) +{ + unsigned long amr, shift; + + shift = (NR_PKEYS - pkey - 1) * PKEY_BITS_PER_PKEY; + amr = pkeyreg_get(); + amr &= ~(PKEY_BITS_MASK << shift); + amr |= (rights & PKEY_BITS_MASK) << shift; + pkeyreg_set(amr); +} + +int sys_pkey_mprotect(void *addr, size_t len, int prot, int pkey) +{ + return syscall(__NR_pkey_mprotect, addr, len, prot, pkey); +} + +int sys_pkey_alloc(unsigned long flags, unsigned long rights) +{ + return syscall(__NR_pkey_alloc, flags, rights); +} + +int sys_pkey_free(int pkey) +{ + return syscall(__NR_pkey_free, pkey); +} + +int pkeys_unsupported(void) +{ + bool hash_mmu = false; + int pkey; + + /* Protection keys are currently supported on Hash MMU only */ + FAIL_IF(using_hash_mmu(&hash_mmu)); + SKIP_IF(!hash_mmu); + + /* Check if the system call is supported */ + pkey = sys_pkey_alloc(0, 0); + SKIP_IF(pkey < 0); + sys_pkey_free(pkey); + + return 0; +} + +int siginfo_pkey(siginfo_t *si) +{ + /* + * In older versions of libc, siginfo_t does not have si_pkey as + * a member. + */ +#ifdef si_pkey + return si->si_pkey; +#else + return *((int *)(((char *) si) + SI_PKEY_OFFSET)); +#endif +} + +#define pkey_rights(r) ({ \ + static char buf[4] = "rwx"; \ + unsigned int amr_bits; \ + if ((r) & PKEY_DISABLE_EXECUTE) \ + buf[2] = '-'; \ + amr_bits = (r) & PKEY_BITS_MASK; \ + if (amr_bits & PKEY_DISABLE_WRITE) \ + buf[1] = '-'; \ + if (amr_bits & PKEY_DISABLE_ACCESS & ~PKEY_DISABLE_WRITE) \ + buf[0] = '-'; \ + buf; \ +}) + +unsigned long next_pkey_rights(unsigned long rights) +{ + if (rights == PKEY_DISABLE_ACCESS) + return PKEY_DISABLE_EXECUTE; + else if (rights == (PKEY_DISABLE_ACCESS | PKEY_DISABLE_EXECUTE)) + return 0; + + if ((rights & PKEY_BITS_MASK) == 0) + rights |= PKEY_DISABLE_WRITE; + else if ((rights & PKEY_BITS_MASK) == PKEY_DISABLE_WRITE) + rights |= PKEY_DISABLE_ACCESS; + + return rights; +} + +#endif /* _SELFTESTS_POWERPC_PKEYS_H */ diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h new file mode 100644 index 000000000..d5a547f72 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/reg.h @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2014, Michael Ellerman, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_REG_H +#define _SELFTESTS_POWERPC_REG_H + +#define __stringify_1(x) #x +#define __stringify(x) __stringify_1(x) + +#define mfspr(rn) ({unsigned long rval; \ + asm volatile("mfspr %0," _str(rn) \ + : "=r" (rval)); rval; }) +#define mtspr(rn, v) asm volatile("mtspr " _str(rn) ",%0" : \ + : "r" ((unsigned long)(v)) \ + : "memory") + +#define mb() asm volatile("sync" : : : "memory"); +#define barrier() asm volatile("" : : : "memory"); + +#define SPRN_MMCR2 769 +#define SPRN_MMCRA 770 +#define SPRN_MMCR0 779 +#define MMCR0_PMAO 0x00000080 +#define MMCR0_PMAE 0x04000000 +#define MMCR0_FC 0x80000000 +#define SPRN_EBBHR 804 +#define SPRN_EBBRR 805 +#define SPRN_BESCR 806 /* Branch event status & control register */ +#define SPRN_BESCRS 800 /* Branch event status & control set (1 bits set to 1) */ +#define SPRN_BESCRSU 801 /* Branch event status & control set upper */ +#define SPRN_BESCRR 802 /* Branch event status & control REset (1 bits set to 0) */ +#define SPRN_BESCRRU 803 /* Branch event status & control REset upper */ + +#define BESCR_PMEO 0x1 /* PMU Event-based exception Occurred */ +#define BESCR_PME (0x1ul << 32) /* PMU Event-based exception Enable */ + +#define SPRN_PMC1 771 +#define SPRN_PMC2 772 +#define SPRN_PMC3 773 +#define SPRN_PMC4 774 +#define SPRN_PMC5 775 +#define SPRN_PMC6 776 + +#define SPRN_SIAR 780 +#define SPRN_SDAR 781 +#define SPRN_SIER 768 + +#define SPRN_TEXASR 0x82 /* Transaction Exception and Status Register */ +#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */ +#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */ +#define SPRN_TAR 0x32f /* Target Address Register */ + +#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) +#define SPRN_PVR 0x11F + +#define PVR_CFG(pvr) (((pvr) >> 8) & 0xF) /* Configuration field */ +#define PVR_MAJ(pvr) (((pvr) >> 4) & 0xF) /* Major revision field */ +#define PVR_MIN(pvr) (((pvr) >> 0) & 0xF) /* Minor revision field */ + +#define SPRN_DSCR_PRIV 0x11 /* Privilege State DSCR */ +#define SPRN_DSCR 0x03 /* Data Stream Control Register */ +#define SPRN_PPR 896 /* Program Priority Register */ +#define SPRN_AMR 13 /* Authority Mask Register - problem state */ + +#define set_amr(v) asm volatile("isync;" \ + "mtspr " __stringify(SPRN_AMR) ",%0;" \ + "isync" : \ + : "r" ((unsigned long)(v)) \ + : "memory") + +/* TEXASR register bits */ +#define TEXASR_FC 0xFE00000000000000 +#define TEXASR_FP 0x0100000000000000 +#define TEXASR_DA 0x0080000000000000 +#define TEXASR_NO 0x0040000000000000 +#define TEXASR_FO 0x0020000000000000 +#define TEXASR_SIC 0x0010000000000000 +#define TEXASR_NTC 0x0008000000000000 +#define TEXASR_TC 0x0004000000000000 +#define TEXASR_TIC 0x0002000000000000 +#define TEXASR_IC 0x0001000000000000 +#define TEXASR_IFC 0x0000800000000000 +#define TEXASR_ABT 0x0000000100000000 +#define TEXASR_SPD 0x0000000080000000 +#define TEXASR_HV 0x0000000020000000 +#define TEXASR_PR 0x0000000010000000 +#define TEXASR_FS 0x0000000008000000 +#define TEXASR_TE 0x0000000004000000 +#define TEXASR_ROT 0x0000000002000000 + +/* MSR register bits */ +#define MSR_HV (1ul << 60) /* Hypervisor state */ +#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */ +#define MSR_TS_T_LG 34 /* Trans Mem state: Active */ + +#define __MASK(X) (1UL<<(X)) + +/* macro to check TM MSR bits */ +#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */ +#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ + +/* Vector Instructions */ +#define VSX_XX1(xs, ra, rb) (((xs) & 0x1f) << 21 | ((ra) << 16) | \ + ((rb) << 11) | (((xs) >> 5))) +#define STXVD2X(xs, ra, rb) .long (0x7c000798 | VSX_XX1((xs), (ra), (rb))) +#define LXVD2X(xs, ra, rb) .long (0x7c000698 | VSX_XX1((xs), (ra), (rb))) + +#define ASM_LOAD_GPR_IMMED(_asm_symbol_name_immed) \ + "li 14, %[" #_asm_symbol_name_immed "];" \ + "li 15, %[" #_asm_symbol_name_immed "];" \ + "li 16, %[" #_asm_symbol_name_immed "];" \ + "li 17, %[" #_asm_symbol_name_immed "];" \ + "li 18, %[" #_asm_symbol_name_immed "];" \ + "li 19, %[" #_asm_symbol_name_immed "];" \ + "li 20, %[" #_asm_symbol_name_immed "];" \ + "li 21, %[" #_asm_symbol_name_immed "];" \ + "li 22, %[" #_asm_symbol_name_immed "];" \ + "li 23, %[" #_asm_symbol_name_immed "];" \ + "li 24, %[" #_asm_symbol_name_immed "];" \ + "li 25, %[" #_asm_symbol_name_immed "];" \ + "li 26, %[" #_asm_symbol_name_immed "];" \ + "li 27, %[" #_asm_symbol_name_immed "];" \ + "li 28, %[" #_asm_symbol_name_immed "];" \ + "li 29, %[" #_asm_symbol_name_immed "];" \ + "li 30, %[" #_asm_symbol_name_immed "];" \ + "li 31, %[" #_asm_symbol_name_immed "];" + +#define ASM_LOAD_FPR(_asm_symbol_name_addr) \ + "lfd 0, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 1, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 2, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 3, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 4, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 5, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 6, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 7, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 8, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 9, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 10, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 11, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 12, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 13, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 14, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 15, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 16, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 17, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 18, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 19, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 20, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 21, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 22, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 23, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 24, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 25, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 26, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 27, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 28, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 29, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 30, 0(%[" #_asm_symbol_name_addr "]);" \ + "lfd 31, 0(%[" #_asm_symbol_name_addr "]);" + +#ifndef __ASSEMBLER__ +void store_gpr(unsigned long *addr); +void load_gpr(unsigned long *addr); +void store_fpr(double *addr); +#endif /* end of __ASSEMBLER__ */ + +#endif /* _SELFTESTS_POWERPC_REG_H */ diff --git a/tools/testing/selftests/powerpc/include/subunit.h b/tools/testing/selftests/powerpc/include/subunit.h new file mode 100644 index 000000000..068d55fdf --- /dev/null +++ b/tools/testing/selftests/powerpc/include/subunit.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_SUBUNIT_H +#define _SELFTESTS_POWERPC_SUBUNIT_H + +static inline void test_start(char *name) +{ + printf("test: %s\n", name); +} + +static inline void test_failure_detail(char *name, char *detail) +{ + printf("failure: %s [%s]\n", name, detail); +} + +static inline void test_failure(char *name) +{ + printf("failure: %s\n", name); +} + +static inline void test_error(char *name) +{ + printf("error: %s\n", name); +} + +static inline void test_skip(char *name) +{ + printf("skip: %s\n", name); +} + +static inline void test_success(char *name) +{ + printf("success: %s\n", name); +} + +static inline void test_finish(char *name, int status) +{ + if (status) + test_failure(name); + else + test_success(name); +} + +static inline void test_set_git_version(char *value) +{ + printf("tags: git_version:%s\n", value); +} + +#endif /* _SELFTESTS_POWERPC_SUBUNIT_H */ diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h new file mode 100644 index 000000000..e222a5858 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/utils.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2013, Michael Ellerman, IBM Corp. + */ + +#ifndef _SELFTESTS_POWERPC_UTILS_H +#define _SELFTESTS_POWERPC_UTILS_H + +#define __cacheline_aligned __attribute__((aligned(128))) + +#include <stdint.h> +#include <stdbool.h> +#include <linux/auxvec.h> +#include <linux/perf_event.h> +#include <asm/cputable.h> +#include "reg.h" + +/* Avoid headaches with PRI?64 - just use %ll? always */ +typedef unsigned long long u64; +typedef signed long long s64; + +/* Just for familiarity */ +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +void test_harness_set_timeout(uint64_t time); +int test_harness(int (test_function)(void), char *name); + +int read_auxv(char *buf, ssize_t buf_size); +void *find_auxv_entry(int type, char *auxv); +void *get_auxv_entry(int type); + +int pick_online_cpu(void); + +int read_debugfs_file(char *debugfs_file, int *result); +int write_debugfs_file(char *debugfs_file, int result); +int read_sysfs_file(char *debugfs_file, char *result, size_t result_size); +int perf_event_open_counter(unsigned int type, + unsigned long config, int group_fd); +int perf_event_enable(int fd); +int perf_event_disable(int fd); +int perf_event_reset(int fd); + +struct perf_event_read { + __u64 nr; + __u64 l1d_misses; +}; + +#if !defined(__GLIBC_PREREQ) || !__GLIBC_PREREQ(2, 30) +#include <unistd.h> +#include <sys/syscall.h> + +static inline pid_t gettid(void) +{ + return syscall(SYS_gettid); +} +#endif + +static inline bool have_hwcap(unsigned long ftr) +{ + return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr; +} + +#ifdef AT_HWCAP2 +static inline bool have_hwcap2(unsigned long ftr2) +{ + return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2; +} +#else +static inline bool have_hwcap2(unsigned long ftr2) +{ + return false; +} +#endif + +static inline char *auxv_base_platform(void) +{ + return ((char *)get_auxv_entry(AT_BASE_PLATFORM)); +} + +static inline char *auxv_platform(void) +{ + return ((char *)get_auxv_entry(AT_PLATFORM)); +} + +bool is_ppc64le(void); +int using_hash_mmu(bool *using_hash); + +/* Yes, this is evil */ +#define FAIL_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[FAIL] Test FAILED on line %d\n", __LINE__); \ + return 1; \ + } \ +} while (0) + +#define FAIL_IF_EXIT(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[FAIL] Test FAILED on line %d\n", __LINE__); \ + _exit(1); \ + } \ +} while (0) + +/* The test harness uses this, yes it's gross */ +#define MAGIC_SKIP_RETURN_VALUE 99 + +#define SKIP_IF(x) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[SKIP] Test skipped on line %d\n", __LINE__); \ + return MAGIC_SKIP_RETURN_VALUE; \ + } \ +} while (0) + +#define SKIP_IF_MSG(x, msg) \ +do { \ + if ((x)) { \ + fprintf(stderr, \ + "[SKIP] Test skipped on line %d: %s\n", \ + __LINE__, msg); \ + return MAGIC_SKIP_RETURN_VALUE; \ + } \ +} while (0) + +#define _str(s) #s +#define str(s) _str(s) + +#define sigsafe_err(msg) ({ \ + ssize_t nbytes __attribute__((unused)); \ + nbytes = write(STDERR_FILENO, msg, strlen(msg)); }) + +/* POWER9 feature */ +#ifndef PPC_FEATURE2_ARCH_3_00 +#define PPC_FEATURE2_ARCH_3_00 0x00800000 +#endif + +/* POWER10 feature */ +#ifndef PPC_FEATURE2_ARCH_3_1 +#define PPC_FEATURE2_ARCH_3_1 0x00040000 +#endif + +/* POWER10 features */ +#ifndef PPC_FEATURE2_MMA +#define PPC_FEATURE2_MMA 0x00020000 +#endif + +#if defined(__powerpc64__) +#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP] +#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.gp_regs[PT_MSR] +#elif defined(__powerpc__) +#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP] +#define UCONTEXT_MSR(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_MSR] +#else +#error implement UCONTEXT_NIA +#endif + +#endif /* _SELFTESTS_POWERPC_UTILS_H */ diff --git a/tools/testing/selftests/powerpc/include/vmx_asm.h b/tools/testing/selftests/powerpc/include/vmx_asm.h new file mode 100644 index 000000000..ad9fb1b40 --- /dev/null +++ b/tools/testing/selftests/powerpc/include/vmx_asm.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + */ + +#include "basic_asm.h" + +/* POS MUST BE 16 ALIGNED! */ +#define PUSH_VMX(pos,reg) \ + li reg,pos; \ + stvx v20,reg,%r1; \ + addi reg,reg,16; \ + stvx v21,reg,%r1; \ + addi reg,reg,16; \ + stvx v22,reg,%r1; \ + addi reg,reg,16; \ + stvx v23,reg,%r1; \ + addi reg,reg,16; \ + stvx v24,reg,%r1; \ + addi reg,reg,16; \ + stvx v25,reg,%r1; \ + addi reg,reg,16; \ + stvx v26,reg,%r1; \ + addi reg,reg,16; \ + stvx v27,reg,%r1; \ + addi reg,reg,16; \ + stvx v28,reg,%r1; \ + addi reg,reg,16; \ + stvx v29,reg,%r1; \ + addi reg,reg,16; \ + stvx v30,reg,%r1; \ + addi reg,reg,16; \ + stvx v31,reg,%r1; + +/* POS MUST BE 16 ALIGNED! */ +#define POP_VMX(pos,reg) \ + li reg,pos; \ + lvx v20,reg,%r1; \ + addi reg,reg,16; \ + lvx v21,reg,%r1; \ + addi reg,reg,16; \ + lvx v22,reg,%r1; \ + addi reg,reg,16; \ + lvx v23,reg,%r1; \ + addi reg,reg,16; \ + lvx v24,reg,%r1; \ + addi reg,reg,16; \ + lvx v25,reg,%r1; \ + addi reg,reg,16; \ + lvx v26,reg,%r1; \ + addi reg,reg,16; \ + lvx v27,reg,%r1; \ + addi reg,reg,16; \ + lvx v28,reg,%r1; \ + addi reg,reg,16; \ + lvx v29,reg,%r1; \ + addi reg,reg,16; \ + lvx v30,reg,%r1; \ + addi reg,reg,16; \ + lvx v31,reg,%r1; + +/* + * Careful this will 'clobber' vmx (by design) + * Don't call this from C + */ +FUNC_START(load_vmx) + li r5,0 + lvx v20,r5,r3 + addi r5,r5,16 + lvx v21,r5,r3 + addi r5,r5,16 + lvx v22,r5,r3 + addi r5,r5,16 + lvx v23,r5,r3 + addi r5,r5,16 + lvx v24,r5,r3 + addi r5,r5,16 + lvx v25,r5,r3 + addi r5,r5,16 + lvx v26,r5,r3 + addi r5,r5,16 + lvx v27,r5,r3 + addi r5,r5,16 + lvx v28,r5,r3 + addi r5,r5,16 + lvx v29,r5,r3 + addi r5,r5,16 + lvx v30,r5,r3 + addi r5,r5,16 + lvx v31,r5,r3 + blr +FUNC_END(load_vmx) diff --git a/tools/testing/selftests/powerpc/include/vsx_asm.h b/tools/testing/selftests/powerpc/include/vsx_asm.h new file mode 100644 index 000000000..434ca2f9b --- /dev/null +++ b/tools/testing/selftests/powerpc/include/vsx_asm.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + */ + +#include "basic_asm.h" + +/* + * Careful this will 'clobber' vsx (by design), VSX are always + * volatile though so unlike vmx this isn't so much of an issue + * Still should avoid calling from C + */ +FUNC_START(load_vsx) + li r5,0 + lxvd2x vs20,r5,r3 + addi r5,r5,16 + lxvd2x vs21,r5,r3 + addi r5,r5,16 + lxvd2x vs22,r5,r3 + addi r5,r5,16 + lxvd2x vs23,r5,r3 + addi r5,r5,16 + lxvd2x vs24,r5,r3 + addi r5,r5,16 + lxvd2x vs25,r5,r3 + addi r5,r5,16 + lxvd2x vs26,r5,r3 + addi r5,r5,16 + lxvd2x vs27,r5,r3 + addi r5,r5,16 + lxvd2x vs28,r5,r3 + addi r5,r5,16 + lxvd2x vs29,r5,r3 + addi r5,r5,16 + lxvd2x vs30,r5,r3 + addi r5,r5,16 + lxvd2x vs31,r5,r3 + blr +FUNC_END(load_vsx) + +FUNC_START(store_vsx) + li r5,0 + stxvd2x vs20,r5,r3 + addi r5,r5,16 + stxvd2x vs21,r5,r3 + addi r5,r5,16 + stxvd2x vs22,r5,r3 + addi r5,r5,16 + stxvd2x vs23,r5,r3 + addi r5,r5,16 + stxvd2x vs24,r5,r3 + addi r5,r5,16 + stxvd2x vs25,r5,r3 + addi r5,r5,16 + stxvd2x vs26,r5,r3 + addi r5,r5,16 + stxvd2x vs27,r5,r3 + addi r5,r5,16 + stxvd2x vs28,r5,r3 + addi r5,r5,16 + stxvd2x vs29,r5,r3 + addi r5,r5,16 + stxvd2x vs30,r5,r3 + addi r5,r5,16 + stxvd2x vs31,r5,r3 + blr +FUNC_END(store_vsx) |