diff options
Diffstat (limited to 'tools/testing/selftests/powerpc/math')
-rw-r--r-- | tools/testing/selftests/powerpc/math/.gitignore | 10 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/Makefile | 21 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/fpu_asm.S | 131 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/fpu_denormal.c | 38 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/fpu_preempt.c | 110 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/fpu_signal.c | 131 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/fpu_syscall.c | 86 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/mma.S | 36 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/mma.c | 48 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vmx_asm.S | 148 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vmx_preempt.c | 113 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vmx_signal.c | 155 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vmx_syscall.c | 92 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vsx_asm.S | 57 | ||||
-rw-r--r-- | tools/testing/selftests/powerpc/math/vsx_preempt.c | 145 |
15 files changed, 1321 insertions, 0 deletions
diff --git a/tools/testing/selftests/powerpc/math/.gitignore b/tools/testing/selftests/powerpc/math/.gitignore new file mode 100644 index 000000000..07b4893ef --- /dev/null +++ b/tools/testing/selftests/powerpc/math/.gitignore @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +fpu_syscall +vmx_syscall +fpu_preempt +vmx_preempt +fpu_signal +vmx_signal +vsx_preempt +fpu_denormal +mma diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile new file mode 100644 index 000000000..3948f7c51 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/Makefile @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 +TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal fpu_denormal vmx_syscall vmx_preempt vmx_signal vsx_preempt mma + +top_srcdir = ../../../../.. +include ../../lib.mk + +$(TEST_GEN_PROGS): ../harness.c +$(TEST_GEN_PROGS): CFLAGS += -O2 -g -pthread -m64 -maltivec + +$(OUTPUT)/fpu_syscall: fpu_asm.S +$(OUTPUT)/fpu_preempt: fpu_asm.S +$(OUTPUT)/fpu_signal: fpu_asm.S + +$(OUTPUT)/vmx_syscall: vmx_asm.S ../utils.c +$(OUTPUT)/vmx_preempt: vmx_asm.S ../utils.c +$(OUTPUT)/vmx_signal: vmx_asm.S ../utils.c + +$(OUTPUT)/vsx_preempt: CFLAGS += -mvsx +$(OUTPUT)/vsx_preempt: vsx_asm.S ../utils.c + +$(OUTPUT)/mma: mma.c mma.S ../utils.c diff --git a/tools/testing/selftests/powerpc/math/fpu_asm.S b/tools/testing/selftests/powerpc/math/fpu_asm.S new file mode 100644 index 000000000..9dc0c158f --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_asm.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + */ + +#include "basic_asm.h" +#include "fpu_asm.h" + +FUNC_START(check_fpu) + mr r4,r3 + li r3,1 # assume a bad result + lfd f0,0(r4) + fcmpu cr1,f0,f14 + bne cr1,1f + lfd f0,8(r4) + fcmpu cr1,f0,f15 + bne cr1,1f + lfd f0,16(r4) + fcmpu cr1,f0,f16 + bne cr1,1f + lfd f0,24(r4) + fcmpu cr1,f0,f17 + bne cr1,1f + lfd f0,32(r4) + fcmpu cr1,f0,f18 + bne cr1,1f + lfd f0,40(r4) + fcmpu cr1,f0,f19 + bne cr1,1f + lfd f0,48(r4) + fcmpu cr1,f0,f20 + bne cr1,1f + lfd f0,56(r4) + fcmpu cr1,f0,f21 + bne cr1,1f + lfd f0,64(r4) + fcmpu cr1,f0,f22 + bne cr1,1f + lfd f0,72(r4) + fcmpu cr1,f0,f23 + bne cr1,1f + lfd f0,80(r4) + fcmpu cr1,f0,f24 + bne cr1,1f + lfd f0,88(r4) + fcmpu cr1,f0,f25 + bne cr1,1f + lfd f0,96(r4) + fcmpu cr1,f0,f26 + bne cr1,1f + lfd f0,104(r4) + fcmpu cr1,f0,f27 + bne cr1,1f + lfd f0,112(r4) + fcmpu cr1,f0,f28 + bne cr1,1f + lfd f0,120(r4) + fcmpu cr1,f0,f29 + bne cr1,1f + lfd f0,128(r4) + fcmpu cr1,f0,f30 + bne cr1,1f + lfd f0,136(r4) + fcmpu cr1,f0,f31 + bne cr1,1f + li r3,0 # Success!!! +1: blr + +FUNC_START(test_fpu) + # r3 holds pointer to where to put the result of fork + # r4 holds pointer to the pid + # f14-f31 are non volatiles + PUSH_BASIC_STACK(256) + PUSH_FPU(256) + std r3,STACK_FRAME_PARAM(0)(sp) # Address of darray + std r4,STACK_FRAME_PARAM(1)(sp) # Address of pid + + bl load_fpu + nop + li r0,__NR_fork + sc + + # pass the result of the fork to the caller + ld r9,STACK_FRAME_PARAM(1)(sp) + std r3,0(r9) + + ld r3,STACK_FRAME_PARAM(0)(sp) + bl check_fpu + nop + + POP_FPU(256) + POP_BASIC_STACK(256) + blr +FUNC_END(test_fpu) + +# int preempt_fpu(double *darray, int *threads_running, int *running) +# On starting will (atomically) decrement not_ready as a signal that the FPU +# has been loaded with darray. Will proceed to check the validity of the FPU +# registers while running is not zero. +FUNC_START(preempt_fpu) + PUSH_BASIC_STACK(256) + PUSH_FPU(256) + std r3,STACK_FRAME_PARAM(0)(sp) # double *darray + std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting + std r5,STACK_FRAME_PARAM(2)(sp) # int *running + + bl load_fpu + nop + + sync + # Atomic DEC + ld r3,STACK_FRAME_PARAM(1)(sp) +1: lwarx r4,0,r3 + addi r4,r4,-1 + stwcx. r4,0,r3 + bne- 1b + +2: ld r3,STACK_FRAME_PARAM(0)(sp) + bl check_fpu + nop + cmpdi r3,0 + bne 3f + ld r4,STACK_FRAME_PARAM(2)(sp) + ld r5,0(r4) + cmpwi r5,0 + bne 2b + +3: POP_FPU(256) + POP_BASIC_STACK(256) + blr +FUNC_END(preempt_fpu) diff --git a/tools/testing/selftests/powerpc/math/fpu_denormal.c b/tools/testing/selftests/powerpc/math/fpu_denormal.c new file mode 100644 index 000000000..5f96682ab --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_denormal.c @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright IBM Corp. 2020 + * + * This test attempts to cause a FP denormal exception on POWER8 CPUs. Unfortunately + * if the denormal handler is not configured or working properly, this can cause a bad + * crash in kernel mode when the kernel tries to save FP registers when the process + * exits. + */ + +#include <stdio.h> +#include <string.h> + +#include "utils.h" + +static int test_denormal_fpu(void) +{ + unsigned int m32; + unsigned long m64; + volatile float f; + volatile double d; + + /* try to induce lfs <denormal> ; stfd */ + + m32 = 0x00715fcf; /* random denormal */ + memcpy((float *)&f, &m32, sizeof(f)); + d = f; + memcpy(&m64, (double *)&d, sizeof(d)); + + FAIL_IF((long)(m64 != 0x380c57f3c0000000)); /* renormalised value */ + + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_denormal_fpu, "fpu_denormal"); +} diff --git a/tools/testing/selftests/powerpc/math/fpu_preempt.c b/tools/testing/selftests/powerpc/math/fpu_preempt.c new file mode 100644 index 000000000..3e5b5663d --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_preempt.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the FPU registers change across preemption. + * Two things should be noted here a) The check_fpu function in asm only checks + * the non volatile registers as it is reused from the syscall test b) There is + * no way to be sure preemption happened so this test just uses many threads + * and a long wait. As such, a successful test doesn't mean much but a failure + * is bad. + */ + +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <pthread.h> + +#include "utils.h" + +/* Time to wait for workers to get preempted (seconds) */ +#define PREEMPT_TIME 20 +/* + * Factor by which to multiply number of online CPUs for total number of + * worker threads + */ +#define THREAD_FACTOR 8 + + +__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, + 2.1}; + +int threads_starting; +int running; + +extern int preempt_fpu(double *darray, int *threads_starting, int *running); + +void *preempt_fpu_c(void *p) +{ + long rc; + int i; + + srand(pthread_self()); + for (i = 0; i < 21; i++) + darray[i] = rand(); + + rc = preempt_fpu(darray, &threads_starting, &running); + + return (void *)rc; +} + +int test_preempt_fpu(void) +{ + int i, rc, threads; + pthread_t *tids; + + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; + tids = malloc((threads) * sizeof(pthread_t)); + FAIL_IF(!tids); + + running = true; + threads_starting = threads; + for (i = 0; i < threads; i++) { + rc = pthread_create(&tids[i], NULL, preempt_fpu_c, NULL); + FAIL_IF(rc); + } + + setbuf(stdout, NULL); + /* Not really necessary but nice to wait for every thread to start */ + printf("\tWaiting for all workers to start..."); + while(threads_starting) + asm volatile("": : :"memory"); + printf("done\n"); + + printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); + sleep(PREEMPT_TIME); + printf("done\n"); + + printf("\tStopping workers..."); + /* + * Working are checking this value every loop. In preempt_fpu 'cmpwi r5,0; bne 2b'. + * r5 will have loaded the value of running. + */ + running = 0; + for (i = 0; i < threads; i++) { + void *rc_p; + pthread_join(tids[i], &rc_p); + + /* + * Harness will say the fail was here, look at why preempt_fpu + * returned + */ + if ((long) rc_p) + printf("oops\n"); + FAIL_IF((long) rc_p); + } + printf("done\n"); + + free(tids); + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_preempt_fpu, "fpu_preempt"); +} diff --git a/tools/testing/selftests/powerpc/math/fpu_signal.c b/tools/testing/selftests/powerpc/math/fpu_signal.c new file mode 100644 index 000000000..7b1addd50 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_signal.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the FPU registers are correctly reported in a + * signal context. Each worker just spins checking its FPU registers, at some + * point a signal will interrupt it and C code will check the signal context + * ensuring it is also the same. + */ + +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <pthread.h> + +#include "utils.h" + +/* Number of times each thread should receive the signal */ +#define ITERATIONS 10 +/* + * Factor by which to multiply number of online CPUs for total number of + * worker threads + */ +#define THREAD_FACTOR 8 + +__thread double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, + 2.1}; + +bool bad_context; +int threads_starting; +int running; + +extern long preempt_fpu(double *darray, int *threads_starting, int *running); + +void signal_fpu_sig(int sig, siginfo_t *info, void *context) +{ + int i; + ucontext_t *uc = context; + mcontext_t *mc = &uc->uc_mcontext; + + /* Only the non volatiles were loaded up */ + for (i = 14; i < 32; i++) { + if (mc->fp_regs[i] != darray[i - 14]) { + bad_context = true; + break; + } + } +} + +void *signal_fpu_c(void *p) +{ + int i; + long rc; + struct sigaction act; + act.sa_sigaction = signal_fpu_sig; + act.sa_flags = SA_SIGINFO; + rc = sigaction(SIGUSR1, &act, NULL); + if (rc) + return p; + + srand(pthread_self()); + for (i = 0; i < 21; i++) + darray[i] = rand(); + + rc = preempt_fpu(darray, &threads_starting, &running); + + return (void *) rc; +} + +int test_signal_fpu(void) +{ + int i, j, rc, threads; + void *rc_p; + pthread_t *tids; + + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; + tids = malloc(threads * sizeof(pthread_t)); + FAIL_IF(!tids); + + running = true; + threads_starting = threads; + for (i = 0; i < threads; i++) { + rc = pthread_create(&tids[i], NULL, signal_fpu_c, NULL); + FAIL_IF(rc); + } + + setbuf(stdout, NULL); + printf("\tWaiting for all workers to start..."); + while (threads_starting) + asm volatile("": : :"memory"); + printf("done\n"); + + printf("\tSending signals to all threads %d times...", ITERATIONS); + for (i = 0; i < ITERATIONS; i++) { + for (j = 0; j < threads; j++) { + pthread_kill(tids[j], SIGUSR1); + } + sleep(1); + } + printf("done\n"); + + printf("\tStopping workers..."); + running = 0; + for (i = 0; i < threads; i++) { + pthread_join(tids[i], &rc_p); + + /* + * Harness will say the fail was here, look at why signal_fpu + * returned + */ + if ((long) rc_p || bad_context) + printf("oops\n"); + if (bad_context) + fprintf(stderr, "\t!! bad_context is true\n"); + FAIL_IF((long) rc_p || bad_context); + } + printf("done\n"); + + free(tids); + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_signal_fpu, "fpu_signal"); +} diff --git a/tools/testing/selftests/powerpc/math/fpu_syscall.c b/tools/testing/selftests/powerpc/math/fpu_syscall.c new file mode 100644 index 000000000..694f225c7 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/fpu_syscall.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the FPU registers change across a syscall (fork). + */ + +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> + +#include "utils.h" + +extern int test_fpu(double *darray, pid_t *pid); + +double darray[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, + 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, + 2.1}; + +int syscall_fpu(void) +{ + pid_t fork_pid; + int i; + int ret; + int child_ret; + for (i = 0; i < 1000; i++) { + /* test_fpu will fork() */ + ret = test_fpu(darray, &fork_pid); + if (fork_pid == -1) + return -1; + if (fork_pid == 0) + exit(ret); + waitpid(fork_pid, &child_ret, 0); + if (ret || child_ret) + return 1; + } + + return 0; +} + +int test_syscall_fpu(void) +{ + /* + * Setup an environment with much context switching + */ + pid_t pid2; + pid_t pid = fork(); + int ret; + int child_ret; + FAIL_IF(pid == -1); + + pid2 = fork(); + /* Can't FAIL_IF(pid2 == -1); because already forked once */ + if (pid2 == -1) { + /* + * Couldn't fork, ensure test is a fail + */ + child_ret = ret = 1; + } else { + ret = syscall_fpu(); + if (pid2) + waitpid(pid2, &child_ret, 0); + else + exit(ret); + } + + ret |= child_ret; + + if (pid) + waitpid(pid, &child_ret, 0); + else + exit(ret); + + FAIL_IF(ret || child_ret); + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_syscall_fpu, "syscall_fpu"); + +} diff --git a/tools/testing/selftests/powerpc/math/mma.S b/tools/testing/selftests/powerpc/math/mma.S new file mode 100644 index 000000000..61cc88b1b --- /dev/null +++ b/tools/testing/selftests/powerpc/math/mma.S @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * + * Test basic matrix multiply assist (MMA) functionality if available. + * + * Copyright 2020, Alistair Popple, IBM Corp. + */ + .global test_mma +test_mma: + /* Load accumulator via VSX registers from image passed in r3 */ + lxvh8x 4,0,3 + lxvh8x 5,0,4 + + /* Clear and prime the accumulator (xxsetaccz) */ + .long 0x7c030162 + + /* Prime the accumulator with MMA VSX move to accumulator + * X-form (xxmtacc) (not needed due to above zeroing) */ + //.long 0x7c010162 + + /* xvi16ger2s */ + .long 0xec042958 + + /* Deprime the accumulator - xxmfacc 0 */ + .long 0x7c000162 + + /* Store result in image passed in r5 */ + stxvw4x 0,0,5 + addi 5,5,16 + stxvw4x 1,0,5 + addi 5,5,16 + stxvw4x 2,0,5 + addi 5,5,16 + stxvw4x 3,0,5 + addi 5,5,16 + + blr diff --git a/tools/testing/selftests/powerpc/math/mma.c b/tools/testing/selftests/powerpc/math/mma.c new file mode 100644 index 000000000..3a71808c9 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/mma.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Test basic matrix multiply assist (MMA) functionality if available. + * + * Copyright 2020, Alistair Popple, IBM Corp. + */ +#include <stdio.h> +#include <stdint.h> + +#include "utils.h" + +extern void test_mma(uint16_t (*)[8], uint16_t (*)[8], uint32_t (*)[4*4]); + +static int mma(void) +{ + int i; + int rc = 0; + uint16_t x[] = {1, 0, 2, 0, 3, 0, 4, 0}; + uint16_t y[] = {1, 0, 2, 0, 3, 0, 4, 0}; + uint32_t z[4*4]; + uint32_t exp[4*4] = {1, 2, 3, 4, + 2, 4, 6, 8, + 3, 6, 9, 12, + 4, 8, 12, 16}; + + SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_ARCH_3_1), "Need ISAv3.1"); + SKIP_IF_MSG(!have_hwcap2(PPC_FEATURE2_MMA), "Need MMA"); + + test_mma(&x, &y, &z); + + for (i = 0; i < 16; i++) { + printf("MMA[%d] = %d ", i, z[i]); + + if (z[i] == exp[i]) { + printf(" (Correct)\n"); + } else { + printf(" (Incorrect)\n"); + rc = 1; + } + } + + return rc; +} + +int main(int argc, char *argv[]) +{ + return test_harness(mma, "mma"); +} diff --git a/tools/testing/selftests/powerpc/math/vmx_asm.S b/tools/testing/selftests/powerpc/math/vmx_asm.S new file mode 100644 index 000000000..11b0704c5 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vmx_asm.S @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + */ + +#include "basic_asm.h" +#include "vmx_asm.h" + +# Should be safe from C, only touches r4, r5 and v0,v1,v2 +FUNC_START(check_vmx) + PUSH_BASIC_STACK(32) + mr r4,r3 + li r3,1 # assume a bad result + li r5,0 + lvx v0,r5,r4 + vcmpequd. v1,v0,v20 + vmr v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v21 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v22 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v23 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v24 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v25 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v26 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v27 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v28 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v29 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v30 + vand v2,v2,v1 + + addi r5,r5,16 + lvx v0,r5,r4 + vcmpequd. v1,v0,v31 + vand v2,v2,v1 + + li r5,STACK_FRAME_LOCAL(0,0) + stvx v2,r5,sp + ldx r0,r5,sp + cmpdi r0,0xffffffffffffffff + bne 1f + li r3,0 +1: POP_BASIC_STACK(32) + blr +FUNC_END(check_vmx) + +# Safe from C +FUNC_START(test_vmx) + # r3 holds pointer to where to put the result of fork + # r4 holds pointer to the pid + # v20-v31 are non-volatile + PUSH_BASIC_STACK(512) + std r3,STACK_FRAME_PARAM(0)(sp) # Address of varray + std r4,STACK_FRAME_PARAM(1)(sp) # address of pid + PUSH_VMX(STACK_FRAME_LOCAL(2,0),r4) + + bl load_vmx + nop + + li r0,__NR_fork + sc + # Pass the result of fork back to the caller + ld r9,STACK_FRAME_PARAM(1)(sp) + std r3,0(r9) + + ld r3,STACK_FRAME_PARAM(0)(sp) + bl check_vmx + nop + + POP_VMX(STACK_FRAME_LOCAL(2,0),r4) + POP_BASIC_STACK(512) + blr +FUNC_END(test_vmx) + +# int preempt_vmx(vector int *varray, int *threads_starting, int *running) +# On starting will (atomically) decrement threads_starting as a signal that +# the VMX have been loaded with varray. Will proceed to check the validity of +# the VMX registers while running is not zero. +FUNC_START(preempt_vmx) + PUSH_BASIC_STACK(512) + std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray + std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting + std r5,STACK_FRAME_PARAM(2)(sp) # int *running + # VMX need to write to 16 byte aligned addresses, skip STACK_FRAME_LOCAL(3,0) + PUSH_VMX(STACK_FRAME_LOCAL(4,0),r4) + + bl load_vmx + nop + + sync + # Atomic DEC + ld r3,STACK_FRAME_PARAM(1)(sp) +1: lwarx r4,0,r3 + addi r4,r4,-1 + stwcx. r4,0,r3 + bne- 1b + +2: ld r3,STACK_FRAME_PARAM(0)(sp) + bl check_vmx + nop + cmpdi r3,0 + bne 3f + ld r4,STACK_FRAME_PARAM(2)(sp) + ld r5,0(r4) + cmpwi r5,0 + bne 2b + +3: POP_VMX(STACK_FRAME_LOCAL(4,0),r4) + POP_BASIC_STACK(512) + blr +FUNC_END(preempt_vmx) diff --git a/tools/testing/selftests/powerpc/math/vmx_preempt.c b/tools/testing/selftests/powerpc/math/vmx_preempt.c new file mode 100644 index 000000000..6f7cf400c --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vmx_preempt.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the VMX registers change across preemption. + * Two things should be noted here a) The check_vmx function in asm only checks + * the non volatile registers as it is reused from the syscall test b) There is + * no way to be sure preemption happened so this test just uses many threads + * and a long wait. As such, a successful test doesn't mean much but a failure + * is bad. + */ + +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <pthread.h> + +#include "utils.h" + +/* Time to wait for workers to get preempted (seconds) */ +#define PREEMPT_TIME 20 +/* + * Factor by which to multiply number of online CPUs for total number of + * worker threads + */ +#define THREAD_FACTOR 8 + +__thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12}, + {13,14,15,16},{17,18,19,20},{21,22,23,24}, + {25,26,27,28},{29,30,31,32},{33,34,35,36}, + {37,38,39,40},{41,42,43,44},{45,46,47,48}}; + +int threads_starting; +int running; + +extern int preempt_vmx(vector int *varray, int *threads_starting, int *running); + +void *preempt_vmx_c(void *p) +{ + int i, j; + long rc; + + srand(pthread_self()); + for (i = 0; i < 12; i++) + for (j = 0; j < 4; j++) + varray[i][j] = rand(); + + rc = preempt_vmx(varray, &threads_starting, &running); + + return (void *)rc; +} + +int test_preempt_vmx(void) +{ + int i, rc, threads; + pthread_t *tids; + + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; + tids = malloc(threads * sizeof(pthread_t)); + FAIL_IF(!tids); + + running = true; + threads_starting = threads; + for (i = 0; i < threads; i++) { + rc = pthread_create(&tids[i], NULL, preempt_vmx_c, NULL); + FAIL_IF(rc); + } + + setbuf(stdout, NULL); + /* Not really nessesary but nice to wait for every thread to start */ + printf("\tWaiting for all workers to start..."); + while(threads_starting) + asm volatile("": : :"memory"); + printf("done\n"); + + printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); + sleep(PREEMPT_TIME); + printf("done\n"); + + printf("\tStopping workers..."); + /* + * Working are checking this value every loop. In preempt_vmx 'cmpwi r5,0; bne 2b'. + * r5 will have loaded the value of running. + */ + running = 0; + for (i = 0; i < threads; i++) { + void *rc_p; + pthread_join(tids[i], &rc_p); + + /* + * Harness will say the fail was here, look at why preempt_vmx + * returned + */ + if ((long) rc_p) + printf("oops\n"); + FAIL_IF((long) rc_p); + } + printf("done\n"); + + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_preempt_vmx, "vmx_preempt"); +} diff --git a/tools/testing/selftests/powerpc/math/vmx_signal.c b/tools/testing/selftests/powerpc/math/vmx_signal.c new file mode 100644 index 000000000..b340a5c4e --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vmx_signal.c @@ -0,0 +1,155 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the VMX registers are correctly reported in a + * signal context. Each worker just spins checking its VMX registers, at some + * point a signal will interrupt it and C code will check the signal context + * ensuring it is also the same. + */ + +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <string.h> +#include <pthread.h> +#include <altivec.h> + +#include "utils.h" + +/* Number of times each thread should receive the signal */ +#define ITERATIONS 10 +/* + * Factor by which to multiply number of online CPUs for total number of + * worker threads + */ +#define THREAD_FACTOR 8 + +__thread vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12}, + {13,14,15,16},{17,18,19,20},{21,22,23,24}, + {25,26,27,28},{29,30,31,32},{33,34,35,36}, + {37,38,39,40},{41,42,43,44},{45,46,47,48}}; + +bool bad_context; +int running; +int threads_starting; + +extern int preempt_vmx(vector int *varray, int *threads_starting, int *sentinal); + +void signal_vmx_sig(int sig, siginfo_t *info, void *context) +{ + int i; + ucontext_t *uc = context; + mcontext_t *mc = &uc->uc_mcontext; + + /* Only the non volatiles were loaded up */ + for (i = 20; i < 32; i++) { + if (memcmp(mc->v_regs->vrregs[i], &varray[i - 20], 16)) { + int j; + /* + * Shouldn't printf() in a signal handler, however, this is a + * test and we've detected failure. Understanding what failed + * is paramount. All that happens after this is tests exit with + * failure. + */ + printf("VMX mismatch at reg %d!\n", i); + printf("Reg | Actual | Expected\n"); + for (j = 20; j < 32; j++) { + printf("%d | 0x%04x%04x%04x%04x | 0x%04x%04x%04x%04x\n", j, mc->v_regs->vrregs[j][0], + mc->v_regs->vrregs[j][1], mc->v_regs->vrregs[j][2], mc->v_regs->vrregs[j][3], + varray[j - 20][0], varray[j - 20][1], varray[j - 20][2], varray[j - 20][3]); + } + bad_context = true; + break; + } + } +} + +void *signal_vmx_c(void *p) +{ + int i, j; + long rc; + struct sigaction act; + act.sa_sigaction = signal_vmx_sig; + act.sa_flags = SA_SIGINFO; + rc = sigaction(SIGUSR1, &act, NULL); + if (rc) + return p; + + srand(pthread_self()); + for (i = 0; i < 12; i++) + for (j = 0; j < 4; j++) + varray[i][j] = rand(); + + rc = preempt_vmx(varray, &threads_starting, &running); + + return (void *) rc; +} + +int test_signal_vmx(void) +{ + int i, j, rc, threads; + void *rc_p; + pthread_t *tids; + + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; + tids = malloc(threads * sizeof(pthread_t)); + FAIL_IF(!tids); + + running = true; + threads_starting = threads; + for (i = 0; i < threads; i++) { + rc = pthread_create(&tids[i], NULL, signal_vmx_c, NULL); + FAIL_IF(rc); + } + + setbuf(stdout, NULL); + printf("\tWaiting for %d workers to start... %d", threads, threads_starting); + while (threads_starting) { + asm volatile("": : :"memory"); + usleep(1000); + printf(", %d", threads_starting); + } + printf(" ...done\n"); + + printf("\tSending signals to all threads %d times...", ITERATIONS); + for (i = 0; i < ITERATIONS; i++) { + for (j = 0; j < threads; j++) { + pthread_kill(tids[j], SIGUSR1); + } + sleep(1); + } + printf("done\n"); + + printf("\tKilling workers..."); + running = 0; + for (i = 0; i < threads; i++) { + pthread_join(tids[i], &rc_p); + + /* + * Harness will say the fail was here, look at why signal_vmx + * returned + */ + if ((long) rc_p || bad_context) + printf("oops\n"); + if (bad_context) + fprintf(stderr, "\t!! bad_context is true\n"); + FAIL_IF((long) rc_p || bad_context); + } + printf("done\n"); + + free(tids); + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_signal_vmx, "vmx_signal"); +} diff --git a/tools/testing/selftests/powerpc/math/vmx_syscall.c b/tools/testing/selftests/powerpc/math/vmx_syscall.c new file mode 100644 index 000000000..03c78dfe3 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vmx_syscall.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the VMX registers change across a syscall (fork). + */ + +#include <altivec.h> +#include <stdio.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <stdlib.h> +#include <sys/types.h> +#include <sys/wait.h> +#include "utils.h" + +vector int varray[] = {{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10,11,12}, + {13,14,15,16},{17,18,19,20},{21,22,23,24}, + {25,26,27,28},{29,30,31,32},{33,34,35,36}, + {37,38,39,40},{41,42,43,44},{45,46,47,48}}; + +extern int test_vmx(vector int *varray, pid_t *pid); + +int vmx_syscall(void) +{ + pid_t fork_pid; + int i; + int ret; + int child_ret; + for (i = 0; i < 1000; i++) { + /* test_vmx will fork() */ + ret = test_vmx(varray, &fork_pid); + if (fork_pid == -1) + return -1; + if (fork_pid == 0) + exit(ret); + waitpid(fork_pid, &child_ret, 0); + if (ret || child_ret) + return 1; + } + + return 0; +} + +int test_vmx_syscall(void) +{ + /* + * Setup an environment with much context switching + */ + pid_t pid2; + pid_t pid; + int ret; + int child_ret; + + // vcmpequd used in vmx_asm.S is v2.07 + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_2_07)); + + pid = fork(); + FAIL_IF(pid == -1); + + pid2 = fork(); + ret = vmx_syscall(); + /* Can't FAIL_IF(pid2 == -1); because we've already forked */ + if (pid2 == -1) { + /* + * Couldn't fork, ensure child_ret is set and is a fail + */ + ret = child_ret = 1; + } else { + if (pid2) + waitpid(pid2, &child_ret, 0); + else + exit(ret); + } + + ret |= child_ret; + + if (pid) + waitpid(pid, &child_ret, 0); + else + exit(ret); + + FAIL_IF(ret || child_ret); + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_vmx_syscall, "vmx_syscall"); + +} diff --git a/tools/testing/selftests/powerpc/math/vsx_asm.S b/tools/testing/selftests/powerpc/math/vsx_asm.S new file mode 100644 index 000000000..ffc165d98 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vsx_asm.S @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright 2015, Cyril Bur, IBM Corp. + */ + +#include "basic_asm.h" +#include "vsx_asm.h" + +#long check_vsx(vector int *r3); +#This function wraps storeing VSX regs to the end of an array and a +#call to a comparison function in C which boils down to a memcmp() +FUNC_START(check_vsx) + PUSH_BASIC_STACK(32) + std r3,STACK_FRAME_PARAM(0)(sp) + addi r3, r3, 16 * 12 #Second half of array + bl store_vsx + ld r3,STACK_FRAME_PARAM(0)(sp) + bl vsx_memcmp + POP_BASIC_STACK(32) + blr +FUNC_END(check_vsx) + +# int preempt_vmx(vector int *varray, int *threads_starting, +# int *running); +# On starting will (atomically) decrement threads_starting as a signal +# that the VMX have been loaded with varray. Will proceed to check the +# validity of the VMX registers while running is not zero. +FUNC_START(preempt_vsx) + PUSH_BASIC_STACK(512) + std r3,STACK_FRAME_PARAM(0)(sp) # vector int *varray + std r4,STACK_FRAME_PARAM(1)(sp) # int *threads_starting + std r5,STACK_FRAME_PARAM(2)(sp) # int *running + + bl load_vsx + nop + + sync + # Atomic DEC + ld r3,STACK_FRAME_PARAM(1)(sp) +1: lwarx r4,0,r3 + addi r4,r4,-1 + stwcx. r4,0,r3 + bne- 1b + +2: ld r3,STACK_FRAME_PARAM(0)(sp) + bl check_vsx + nop + cmpdi r3,0 + bne 3f + ld r4,STACK_FRAME_PARAM(2)(sp) + ld r5,0(r4) + cmpwi r5,0 + bne 2b + +3: POP_BASIC_STACK(512) + blr +FUNC_END(preempt_vsx) diff --git a/tools/testing/selftests/powerpc/math/vsx_preempt.c b/tools/testing/selftests/powerpc/math/vsx_preempt.c new file mode 100644 index 000000000..d1601bb88 --- /dev/null +++ b/tools/testing/selftests/powerpc/math/vsx_preempt.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2015, Cyril Bur, IBM Corp. + * + * This test attempts to see if the VSX registers change across preemption. + * There is no way to be sure preemption happened so this test just + * uses many threads and a long wait. As such, a successful test + * doesn't mean much but a failure is bad. + */ + +#include <stdio.h> +#include <string.h> +#include <unistd.h> +#include <sys/syscall.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <stdlib.h> +#include <pthread.h> + +#include "utils.h" + +/* Time to wait for workers to get preempted (seconds) */ +#define PREEMPT_TIME 20 +/* + * Factor by which to multiply number of online CPUs for total number of + * worker threads + */ +#define THREAD_FACTOR 8 + +/* + * Ensure there is twice the number of non-volatile VMX regs! + * check_vmx() is going to use the other half as space to put the live + * registers before calling vsx_memcmp() + */ +__thread vector int varray[24] = { + {1, 2, 3, 4 }, {5, 6, 7, 8 }, {9, 10,11,12}, + {13,14,15,16}, {17,18,19,20}, {21,22,23,24}, + {25,26,27,28}, {29,30,31,32}, {33,34,35,36}, + {37,38,39,40}, {41,42,43,44}, {45,46,47,48} +}; + +int threads_starting; +int running; + +extern long preempt_vsx(vector int *varray, int *threads_starting, int *running); + +long vsx_memcmp(vector int *a) { + vector int zero = {0, 0, 0, 0}; + int i; + + FAIL_IF(a != varray); + + for(i = 0; i < 12; i++) { + if (memcmp(&a[i + 12], &zero, sizeof(vector int)) == 0) { + fprintf(stderr, "Detected zero from the VSX reg %d\n", i + 12); + return 2; + } + } + + if (memcmp(a, &a[12], 12 * sizeof(vector int))) { + long *p = (long *)a; + fprintf(stderr, "VSX mismatch\n"); + for (i = 0; i < 24; i=i+2) + fprintf(stderr, "%d: 0x%08lx%08lx | 0x%08lx%08lx\n", + i/2 + i%2 + 20, p[i], p[i + 1], p[i + 24], p[i + 25]); + return 1; + } + return 0; +} + +void *preempt_vsx_c(void *p) +{ + int i, j; + long rc; + srand(pthread_self()); + for (i = 0; i < 12; i++) + for (j = 0; j < 4; j++) { + varray[i][j] = rand(); + /* Don't want zero because it hides kernel problems */ + if (varray[i][j] == 0) + j--; + } + rc = preempt_vsx(varray, &threads_starting, &running); + if (rc == 2) + fprintf(stderr, "Caught zeros in VSX compares\n"); + return (void *)rc; +} + +int test_preempt_vsx(void) +{ + int i, rc, threads; + pthread_t *tids; + + SKIP_IF(!have_hwcap(PPC_FEATURE_HAS_VSX)); + + threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; + tids = malloc(threads * sizeof(pthread_t)); + FAIL_IF(!tids); + + running = true; + threads_starting = threads; + for (i = 0; i < threads; i++) { + rc = pthread_create(&tids[i], NULL, preempt_vsx_c, NULL); + FAIL_IF(rc); + } + + setbuf(stdout, NULL); + /* Not really nessesary but nice to wait for every thread to start */ + printf("\tWaiting for %d workers to start...", threads_starting); + while(threads_starting) + asm volatile("": : :"memory"); + printf("done\n"); + + printf("\tWaiting for %d seconds to let some workers get preempted...", PREEMPT_TIME); + sleep(PREEMPT_TIME); + printf("done\n"); + + printf("\tStopping workers..."); + /* + * Working are checking this value every loop. In preempt_vsx 'cmpwi r5,0; bne 2b'. + * r5 will have loaded the value of running. + */ + running = 0; + for (i = 0; i < threads; i++) { + void *rc_p; + pthread_join(tids[i], &rc_p); + + /* + * Harness will say the fail was here, look at why preempt_vsx + * returned + */ + if ((long) rc_p) + printf("oops\n"); + FAIL_IF((long) rc_p); + } + printf("done\n"); + + return 0; +} + +int main(int argc, char *argv[]) +{ + return test_harness(test_preempt_vsx, "vsx_preempt"); +} |