diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /tools/testing/selftests/powerpc/mm | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/testing/selftests/powerpc/mm')
17 files changed, 3012 insertions, 0 deletions
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore new file mode 100644 index 0000000000..0df1a3afc5 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/.gitignore @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0-only +bad_accesses +exec_prot +hugetlb_vs_thp_test +large_vm_fork_separation +large_vm_gpr_corruption +pkey_exec_prot +pkey_siginfo +prot_sao +segv_errors +stack_expansion_ldst +stack_expansion_signal +subpage_prot +tempfile +tlbie_test +wild_bctr diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile new file mode 100644 index 0000000000..4a6608beef --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 +noarg: + $(MAKE) -C ../ + +TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \ + large_vm_fork_separation bad_accesses exec_prot pkey_exec_prot \ + pkey_siginfo stack_expansion_signal stack_expansion_ldst \ + large_vm_gpr_corruption +TEST_PROGS := stress_code_patching.sh + +TEST_GEN_PROGS_EXTENDED := tlbie_test +TEST_GEN_FILES := tempfile + +top_srcdir = ../../../../.. +include ../../lib.mk + +$(TEST_GEN_PROGS): ../harness.c ../utils.c + +$(OUTPUT)/prot_sao: ../utils.c + +$(OUTPUT)/wild_bctr: CFLAGS += -m64 +$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64 +$(OUTPUT)/large_vm_gpr_corruption: CFLAGS += -m64 +$(OUTPUT)/bad_accesses: CFLAGS += -m64 +$(OUTPUT)/exec_prot: CFLAGS += -m64 +$(OUTPUT)/pkey_exec_prot: CFLAGS += -m64 +$(OUTPUT)/pkey_siginfo: CFLAGS += -m64 + +$(OUTPUT)/stack_expansion_signal: ../utils.c ../pmu/lib.c + +$(OUTPUT)/stack_expansion_ldst: CFLAGS += -fno-stack-protector +$(OUTPUT)/stack_expansion_ldst: ../utils.c + +$(OUTPUT)/tempfile: + dd if=/dev/zero of=$@ bs=64k count=1 status=none + +$(OUTPUT)/tlbie_test: LDLIBS += -lpthread +$(OUTPUT)/pkey_siginfo: LDLIBS += -lpthread diff --git a/tools/testing/selftests/powerpc/mm/bad_accesses.c b/tools/testing/selftests/powerpc/mm/bad_accesses.c new file mode 100644 index 0000000000..65d2148b05 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/bad_accesses.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright 2019, Michael Ellerman, IBM Corp. +// +// Test that out-of-bounds reads/writes behave as expected. + +#include <setjmp.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "utils.h" + +// Old distros (Ubuntu 16.04 at least) don't define this +#ifndef SEGV_BNDERR +#define SEGV_BNDERR 3 +#endif + +// 64-bit kernel is always here +#define PAGE_OFFSET (0xcul << 60) + +static unsigned long kernel_virt_end; + +static volatile int fault_code; +static volatile unsigned long fault_addr; +static jmp_buf setjmp_env; + +static void segv_handler(int n, siginfo_t *info, void *ctxt_v) +{ + fault_code = info->si_code; + fault_addr = (unsigned long)info->si_addr; + siglongjmp(setjmp_env, 1); +} + +int bad_access(char *p, bool write) +{ + char x = 0; + + fault_code = 0; + fault_addr = 0; + + if (sigsetjmp(setjmp_env, 1) == 0) { + if (write) + *p = 1; + else + x = *p; + + printf("Bad - no SEGV! (%c)\n", x); + return 1; + } + + // If we see MAPERR that means we took a page fault rather than an SLB + // miss. We only expect to take page faults for addresses within the + // valid kernel range. + FAIL_IF(fault_code == SEGV_MAPERR && \ + (fault_addr < PAGE_OFFSET || fault_addr >= kernel_virt_end)); + + FAIL_IF(fault_code != SEGV_MAPERR && fault_code != SEGV_BNDERR); + + return 0; +} + +static int test(void) +{ + unsigned long i, j, addr, region_shift, page_shift, page_size; + struct sigaction sig; + bool hash_mmu; + + sig = (struct sigaction) { + .sa_sigaction = segv_handler, + .sa_flags = SA_SIGINFO, + }; + + FAIL_IF(sigaction(SIGSEGV, &sig, NULL) != 0); + + FAIL_IF(using_hash_mmu(&hash_mmu)); + + page_size = sysconf(_SC_PAGESIZE); + if (page_size == (64 * 1024)) + page_shift = 16; + else + page_shift = 12; + + if (page_size == (64 * 1024) || !hash_mmu) { + region_shift = 52; + + // We have 7 512T regions (4 kernel linear, vmalloc, io, vmemmap) + kernel_virt_end = PAGE_OFFSET + (7 * (512ul << 40)); + } else if (page_size == (4 * 1024) && hash_mmu) { + region_shift = 46; + + // We have 7 64T regions (4 kernel linear, vmalloc, io, vmemmap) + kernel_virt_end = PAGE_OFFSET + (7 * (64ul << 40)); + } else + FAIL_IF(true); + + printf("Using %s MMU, PAGE_SIZE = %dKB start address 0x%016lx\n", + hash_mmu ? "hash" : "radix", + (1 << page_shift) >> 10, + 1ul << region_shift); + + // This generates access patterns like: + // 0x0010000000000000 + // 0x0010000000010000 + // 0x0010000000020000 + // ... + // 0x0014000000000000 + // 0x0018000000000000 + // 0x0020000000000000 + // 0x0020000000010000 + // 0x0020000000020000 + // ... + // 0xf400000000000000 + // 0xf800000000000000 + + for (i = 1; i <= ((0xful << 60) >> region_shift); i++) { + for (j = page_shift - 1; j < 60; j++) { + unsigned long base, delta; + + base = i << region_shift; + delta = 1ul << j; + + if (delta >= base) + break; + + addr = (base | delta) & ~((1 << page_shift) - 1); + + FAIL_IF(bad_access((char *)addr, false)); + FAIL_IF(bad_access((char *)addr, true)); + } + } + + return 0; +} + +int main(void) +{ + test_harness_set_timeout(300); + return test_harness(test, "bad_accesses"); +} diff --git a/tools/testing/selftests/powerpc/mm/exec_prot.c b/tools/testing/selftests/powerpc/mm/exec_prot.c new file mode 100644 index 0000000000..db75b2225d --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/exec_prot.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2022, Nicholas Miehlbradt, IBM Corporation + * based on pkey_exec_prot.c + * + * Test if applying execute protection on pages works as expected. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <signal.h> + +#include <unistd.h> +#include <sys/mman.h> + +#include "pkeys.h" + + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_TRAP 0x7fe00008 +#define PPC_INST_BLR 0x4e800020 + +static volatile sig_atomic_t fault_code; +static volatile sig_atomic_t remaining_faults; +static volatile unsigned int *fault_addr; +static unsigned long pgsize, numinsns; +static unsigned int *insns; +static bool pkeys_supported; + +static bool is_fault_expected(int fault_code) +{ + if (fault_code == SEGV_ACCERR) + return true; + + /* Assume any pkey error is fine since pkey_exec_prot test covers them */ + if (fault_code == SEGV_PKUERR && pkeys_supported) + return true; + + return false; +} + +static void trap_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *)fault_addr) + sigsafe_err("got a fault for an unexpected address\n"); + + _exit(1); +} + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + fault_code = sinfo->si_code; + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *)fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if too many faults have occurred for a single test case */ + if (!remaining_faults) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + + /* Restore permissions in order to continue */ + if (is_fault_expected(fault_code)) { + if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE | PROT_EXEC)) { + sigsafe_err("failed to set access permissions\n"); + _exit(1); + } + } else { + sigsafe_err("got a fault with an unexpected code\n"); + _exit(1); + } + + remaining_faults--; +} + +static int check_exec_fault(int rights) +{ + /* + * Jump to the executable region. + * + * The first iteration also checks if the overwrite of the + * first instruction word from a trap to a no-op succeeded. + */ + fault_code = -1; + remaining_faults = 0; + if (!(rights & PROT_EXEC)) + remaining_faults = 1; + + FAIL_IF(mprotect(insns, pgsize, rights) != 0); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + + FAIL_IF(remaining_faults != 0); + if (!(rights & PROT_EXEC)) + FAIL_IF(!is_fault_expected(fault_code)); + + return 0; +} + +static int test(void) +{ + struct sigaction segv_act, trap_act; + int i; + + /* Skip the test if the CPU doesn't support Radix */ + SKIP_IF(!have_hwcap2(PPC_FEATURE2_ARCH_3_00)); + + /* Check if pkeys are supported */ + pkeys_supported = pkeys_unsupported() == 0; + + /* Setup SIGSEGV handler */ + segv_act.sa_handler = 0; + segv_act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0); + segv_act.sa_flags = SA_SIGINFO; + segv_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0); + + /* Setup SIGTRAP handler */ + trap_act.sa_handler = 0; + trap_act.sa_sigaction = trap_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0); + trap_act.sa_flags = SA_SIGINFO; + trap_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0); + + /* Setup executable region */ + pgsize = getpagesize(); + numinsns = pgsize / sizeof(unsigned int); + insns = (unsigned int *)mmap(NULL, pgsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(insns == MAP_FAILED); + + /* Write the instruction words */ + for (i = 1; i < numinsns - 1; i++) + insns[i] = PPC_INST_NOP; + + /* + * Set the first instruction as an unconditional trap. If + * the last write to this address succeeds, this should + * get overwritten by a no-op. + */ + insns[0] = PPC_INST_TRAP; + + /* + * Later, to jump to the executable region, we use a branch + * and link instruction (bctrl) which sets the return address + * automatically in LR. Use that to return back. + */ + insns[numinsns - 1] = PPC_INST_BLR; + + /* + * Pick the first instruction's address from the executable + * region. + */ + fault_addr = insns; + + /* + * Read an instruction word from the address when the page + * is execute only. This should generate an access fault. + */ + fault_code = -1; + remaining_faults = 1; + printf("Testing read on --x, should fault..."); + FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); + i = *fault_addr; + FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code)); + printf("ok!\n"); + + /* + * Write an instruction word to the address when the page + * execute only. This should also generate an access fault. + */ + fault_code = -1; + remaining_faults = 1; + printf("Testing write on --x, should fault..."); + FAIL_IF(mprotect(insns, pgsize, PROT_EXEC) != 0); + *fault_addr = PPC_INST_NOP; + FAIL_IF(remaining_faults != 0 || !is_fault_expected(fault_code)); + printf("ok!\n"); + + printf("Testing exec on ---, should fault..."); + FAIL_IF(check_exec_fault(PROT_NONE)); + printf("ok!\n"); + + printf("Testing exec on r--, should fault..."); + FAIL_IF(check_exec_fault(PROT_READ)); + printf("ok!\n"); + + printf("Testing exec on -w-, should fault..."); + FAIL_IF(check_exec_fault(PROT_WRITE)); + printf("ok!\n"); + + printf("Testing exec on rw-, should fault..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE)); + printf("ok!\n"); + + printf("Testing exec on --x, should succeed..."); + FAIL_IF(check_exec_fault(PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on r-x, should succeed..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on -wx, should succeed..."); + FAIL_IF(check_exec_fault(PROT_WRITE | PROT_EXEC)); + printf("ok!\n"); + + printf("Testing exec on rwx, should succeed..."); + FAIL_IF(check_exec_fault(PROT_READ | PROT_WRITE | PROT_EXEC)); + printf("ok!\n"); + + /* Cleanup */ + FAIL_IF(munmap((void *)insns, pgsize)); + + return 0; +} + +int main(void) +{ + return test_harness(test, "exec_prot"); +} diff --git a/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c new file mode 100644 index 0000000000..9932359ce3 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/hugetlb_vs_thp_test.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <sys/mman.h> +#include <unistd.h> + +#include "utils.h" + +/* This must match the huge page & THP size */ +#define SIZE (16 * 1024 * 1024) + +static int test_body(void) +{ + void *addr; + char *p; + + addr = (void *)0xa0000000; + + p = mmap(addr, SIZE, PROT_READ | PROT_WRITE, + MAP_HUGETLB | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (p != MAP_FAILED) { + /* + * Typically the mmap will fail because no huge pages are + * allocated on the system. But if there are huge pages + * allocated the mmap will succeed. That's fine too, we just + * munmap here before continuing. munmap() length of + * MAP_HUGETLB memory must be hugepage aligned. + */ + if (munmap(addr, SIZE)) { + perror("munmap"); + return 1; + } + } + + p = mmap(addr, SIZE, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + if (p == MAP_FAILED) { + printf("Mapping failed @ %p\n", addr); + perror("mmap"); + return 1; + } + + /* + * Either a user or kernel access is sufficient to trigger the bug. + * A kernel access is easier to spot & debug, as it will trigger the + * softlockup or RCU stall detectors, and when the system is kicked + * into xmon we get a backtrace in the kernel. + * + * A good option is: + * getcwd(p, SIZE); + * + * For the purposes of this testcase it's preferable to spin in + * userspace, so the harness can kill us if we get stuck. That way we + * see a test failure rather than a dead system. + */ + *p = 0xf; + + munmap(addr, SIZE); + + return 0; +} + +static int test_main(void) +{ + int i; + + /* 10,000 because it's a "bunch", and completes reasonably quickly */ + for (i = 0; i < 10000; i++) + if (test_body()) + return 1; + + return 0; +} + +int main(void) +{ + return test_harness(test_main, "hugetlb_vs_thp"); +} diff --git a/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c new file mode 100644 index 0000000000..2363a7f3ab --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/large_vm_fork_separation.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright 2019, Michael Ellerman, IBM Corp. +// +// Test that allocating memory beyond the memory limit and then forking is +// handled correctly, ie. the child is able to access the mappings beyond the +// memory limit and the child's writes are not visible to the parent. + +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "utils.h" + + +#ifndef MAP_FIXED_NOREPLACE +#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB +#endif + + +static int test(void) +{ + int p2c[2], c2p[2], rc, status, c, *p; + unsigned long page_size; + pid_t pid; + + page_size = sysconf(_SC_PAGESIZE); + SKIP_IF(page_size != 65536); + + // Create a mapping at 512TB to allocate an extended_id + p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0); + if (p == MAP_FAILED) { + perror("mmap"); + printf("Error: couldn't mmap(), confirm kernel has 4TB support?\n"); + return 1; + } + + printf("parent writing %p = 1\n", p); + *p = 1; + + FAIL_IF(pipe(p2c) == -1 || pipe(c2p) == -1); + + pid = fork(); + if (pid == 0) { + FAIL_IF(read(p2c[0], &c, 1) != 1); + + pid = getpid(); + printf("child writing %p = %d\n", p, pid); + *p = pid; + + FAIL_IF(write(c2p[1], &c, 1) != 1); + FAIL_IF(read(p2c[0], &c, 1) != 1); + exit(0); + } + + c = 0; + FAIL_IF(write(p2c[1], &c, 1) != 1); + FAIL_IF(read(c2p[0], &c, 1) != 1); + + // Prevent compiler optimisation + barrier(); + + rc = 0; + printf("parent reading %p = %d\n", p, *p); + if (*p != 1) { + printf("Error: BUG! parent saw child's write! *p = %d\n", *p); + rc = 1; + } + + FAIL_IF(write(p2c[1], &c, 1) != 1); + FAIL_IF(waitpid(pid, &status, 0) == -1); + FAIL_IF(!WIFEXITED(status) || WEXITSTATUS(status)); + + if (rc == 0) + printf("success: test completed OK\n"); + + return rc; +} + +int main(void) +{ + return test_harness(test, "large_vm_fork_separation"); +} diff --git a/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c new file mode 100644 index 0000000000..7da515f1da --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright 2022, Michael Ellerman, IBM Corp. +// +// Test that the 4PB address space SLB handling doesn't corrupt userspace registers +// (r9-r13) due to a SLB fault while saving the PPR. +// +// The bug was introduced in f384796c4 ("powerpc/mm: Add support for handling > 512TB +// address in SLB miss") and fixed in 4c2de74cc869 ("powerpc/64: Interrupts save PPR on +// stack rather than thread_struct"). +// +// To hit the bug requires the task struct and kernel stack to be in different segments. +// Usually that requires more than 1TB of RAM, or if that's not practical, boot the kernel +// with "disable_1tb_segments". +// +// The test works by creating mappings above 512TB, to trigger the large address space +// support. It creates 64 mappings, double the size of the SLB, to cause SLB faults on +// each access (assuming naive replacement). It then loops over those mappings touching +// each, and checks that r9-r13 aren't corrupted. +// +// It then forks another child and tries again, because a new child process will get a new +// kernel stack and thread struct allocated, which may be more optimally placed to trigger +// the bug. It would probably be better to leave the previous child processes hanging +// around, so that kernel stack & thread struct allocations are not reused, but that would +// amount to a 30 second fork bomb. The current design reliably triggers the bug on +// unpatched kernels. + +#include <signal.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "utils.h" + +#ifndef MAP_FIXED_NOREPLACE +#define MAP_FIXED_NOREPLACE MAP_FIXED // "Should be safe" above 512TB +#endif + +#define BASE_ADDRESS (1ul << 50) // 1PB +#define STRIDE (2ul << 40) // 2TB +#define SLB_SIZE 32 +#define NR_MAPPINGS (SLB_SIZE * 2) + +static volatile sig_atomic_t signaled; + +static void signal_handler(int sig) +{ + signaled = 1; +} + +#define CHECK_REG(_reg) \ + if (_reg != _reg##_orig) { \ + printf(str(_reg) " corrupted! Expected 0x%lx != 0x%lx\n", _reg##_orig, \ + _reg); \ + _exit(1); \ + } + +static int touch_mappings(void) +{ + unsigned long r9_orig, r10_orig, r11_orig, r12_orig, r13_orig; + unsigned long r9, r10, r11, r12, r13; + unsigned long addr, *p; + int i; + + for (i = 0; i < NR_MAPPINGS; i++) { + addr = BASE_ADDRESS + (i * STRIDE); + p = (unsigned long *)addr; + + asm volatile("mr %0, %%r9 ;" // Read original GPR values + "mr %1, %%r10 ;" + "mr %2, %%r11 ;" + "mr %3, %%r12 ;" + "mr %4, %%r13 ;" + "std %10, 0(%11) ;" // Trigger SLB fault + "mr %5, %%r9 ;" // Save possibly corrupted values + "mr %6, %%r10 ;" + "mr %7, %%r11 ;" + "mr %8, %%r12 ;" + "mr %9, %%r13 ;" + "mr %%r9, %0 ;" // Restore original values + "mr %%r10, %1 ;" + "mr %%r11, %2 ;" + "mr %%r12, %3 ;" + "mr %%r13, %4 ;" + : "=&b"(r9_orig), "=&b"(r10_orig), "=&b"(r11_orig), + "=&b"(r12_orig), "=&b"(r13_orig), "=&b"(r9), "=&b"(r10), + "=&b"(r11), "=&b"(r12), "=&b"(r13) + : "b"(i), "b"(p) + : "r9", "r10", "r11", "r12", "r13"); + + CHECK_REG(r9); + CHECK_REG(r10); + CHECK_REG(r11); + CHECK_REG(r12); + CHECK_REG(r13); + } + + return 0; +} + +static int test(void) +{ + unsigned long page_size, addr, *p; + struct sigaction action; + bool hash_mmu; + int i, status; + pid_t pid; + + // This tests a hash MMU specific bug. + FAIL_IF(using_hash_mmu(&hash_mmu)); + SKIP_IF(!hash_mmu); + // 4K kernels don't support 4PB address space + SKIP_IF(sysconf(_SC_PAGESIZE) < 65536); + + page_size = sysconf(_SC_PAGESIZE); + + for (i = 0; i < NR_MAPPINGS; i++) { + addr = BASE_ADDRESS + (i * STRIDE); + + p = mmap((void *)addr, page_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE, -1, 0); + if (p == MAP_FAILED) { + perror("mmap"); + printf("Error: couldn't mmap(), confirm kernel has 4PB support?\n"); + return 1; + } + } + + action.sa_handler = signal_handler; + action.sa_flags = SA_RESTART; + FAIL_IF(sigaction(SIGALRM, &action, NULL) < 0); + + // Seen to always crash in under ~10s on affected kernels. + alarm(30); + + while (!signaled) { + // Fork new processes, to increase the chance that we hit the case where + // the kernel stack and task struct are in different segments. + pid = fork(); + if (pid == 0) + exit(touch_mappings()); + + FAIL_IF(waitpid(-1, &status, 0) == -1); + FAIL_IF(WIFSIGNALED(status)); + FAIL_IF(!WIFEXITED(status)); + FAIL_IF(WEXITSTATUS(status)); + } + + return 0; +} + +int main(void) +{ + return test_harness(test, "large_vm_gpr_corruption"); +} diff --git a/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c new file mode 100644 index 0000000000..0af4f02669 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/pkey_exec_prot.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0+ + +/* + * Copyright 2020, Sandipan Das, IBM Corp. + * + * Test if applying execute protection on pages using memory + * protection keys works as expected. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <signal.h> + +#include <unistd.h> + +#include "pkeys.h" + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_TRAP 0x7fe00008 +#define PPC_INST_BLR 0x4e800020 + +static volatile sig_atomic_t fault_pkey, fault_code, fault_type; +static volatile sig_atomic_t remaining_faults; +static volatile unsigned int *fault_addr; +static unsigned long pgsize, numinsns; +static unsigned int *insns; + +static void trap_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) + sigsafe_err("got a fault for an unexpected address\n"); + + _exit(1); +} + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + int signal_pkey; + + signal_pkey = siginfo_pkey(sinfo); + fault_code = sinfo->si_code; + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if too many faults have occurred for a single test case */ + if (!remaining_faults) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + + /* Restore permissions in order to continue */ + switch (fault_code) { + case SEGV_ACCERR: + if (mprotect(insns, pgsize, PROT_READ | PROT_WRITE)) { + sigsafe_err("failed to set access permissions\n"); + _exit(1); + } + break; + case SEGV_PKUERR: + if (signal_pkey != fault_pkey) { + sigsafe_err("got a fault for an unexpected pkey\n"); + _exit(1); + } + + switch (fault_type) { + case PKEY_DISABLE_ACCESS: + pkey_set_rights(fault_pkey, 0); + break; + case PKEY_DISABLE_EXECUTE: + /* + * Reassociate the exec-only pkey with the region + * to be able to continue. Unlike AMR, we cannot + * set IAMR directly from userspace to restore the + * permissions. + */ + if (mprotect(insns, pgsize, PROT_EXEC)) { + sigsafe_err("failed to set execute permissions\n"); + _exit(1); + } + break; + default: + sigsafe_err("got a fault with an unexpected type\n"); + _exit(1); + } + break; + default: + sigsafe_err("got a fault with an unexpected code\n"); + _exit(1); + } + + remaining_faults--; +} + +static int test(void) +{ + struct sigaction segv_act, trap_act; + unsigned long rights; + int pkey, ret, i; + + ret = pkeys_unsupported(); + if (ret) + return ret; + + /* Setup SIGSEGV handler */ + segv_act.sa_handler = 0; + segv_act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &segv_act.sa_mask) != 0); + segv_act.sa_flags = SA_SIGINFO; + segv_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &segv_act, NULL) != 0); + + /* Setup SIGTRAP handler */ + trap_act.sa_handler = 0; + trap_act.sa_sigaction = trap_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &trap_act.sa_mask) != 0); + trap_act.sa_flags = SA_SIGINFO; + trap_act.sa_restorer = 0; + FAIL_IF(sigaction(SIGTRAP, &trap_act, NULL) != 0); + + /* Setup executable region */ + pgsize = getpagesize(); + numinsns = pgsize / sizeof(unsigned int); + insns = (unsigned int *) mmap(NULL, pgsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(insns == MAP_FAILED); + + /* Write the instruction words */ + for (i = 1; i < numinsns - 1; i++) + insns[i] = PPC_INST_NOP; + + /* + * Set the first instruction as an unconditional trap. If + * the last write to this address succeeds, this should + * get overwritten by a no-op. + */ + insns[0] = PPC_INST_TRAP; + + /* + * Later, to jump to the executable region, we use a branch + * and link instruction (bctrl) which sets the return address + * automatically in LR. Use that to return back. + */ + insns[numinsns - 1] = PPC_INST_BLR; + + /* Allocate a pkey that restricts execution */ + rights = PKEY_DISABLE_EXECUTE; + pkey = sys_pkey_alloc(0, rights); + FAIL_IF(pkey < 0); + + /* + * Pick the first instruction's address from the executable + * region. + */ + fault_addr = insns; + + /* The following two cases will avoid SEGV_PKUERR */ + fault_type = -1; + fault_pkey = -1; + + /* + * Read an instruction word from the address when AMR bits + * are not set i.e. the pkey permits both read and write + * access. + * + * This should not generate a fault as having PROT_EXEC + * implies PROT_READ on GNU systems. The pkey currently + * restricts execution only based on the IAMR bits. The + * AMR bits are cleared. + */ + remaining_faults = 0; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("read from %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + i = *fault_addr; + FAIL_IF(remaining_faults != 0); + + /* + * Write an instruction word to the address when AMR bits + * are not set i.e. the pkey permits both read and write + * access. + * + * This should generate an access fault as having just + * PROT_EXEC also restricts writes. The pkey currently + * restricts execution only based on the IAMR bits. The + * AMR bits are cleared. + */ + remaining_faults = 1; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("write to %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + *fault_addr = PPC_INST_TRAP; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); + + /* The following three cases will generate SEGV_PKUERR */ + rights |= PKEY_DISABLE_ACCESS; + fault_type = PKEY_DISABLE_ACCESS; + fault_pkey = pkey; + + /* + * Read an instruction word from the address when AMR bits + * are set i.e. the pkey permits neither read nor write + * access. + * + * This should generate a pkey fault based on AMR bits only + * as having PROT_EXEC implicitly allows reads. + */ + remaining_faults = 1; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + pkey_set_rights(pkey, rights); + printf("read from %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + i = *fault_addr; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_PKUERR); + + /* + * Write an instruction word to the address when AMR bits + * are set i.e. the pkey permits neither read nor write + * access. + * + * This should generate two faults. First, a pkey fault + * based on AMR bits and then an access fault since + * PROT_EXEC does not allow writes. + */ + remaining_faults = 2; + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + pkey_set_rights(pkey, rights); + printf("write to %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + *fault_addr = PPC_INST_NOP; + FAIL_IF(remaining_faults != 0 || fault_code != SEGV_ACCERR); + + /* Free the current pkey */ + sys_pkey_free(pkey); + + rights = 0; + do { + /* + * Allocate pkeys with all valid combinations of read, + * write and execute restrictions. + */ + pkey = sys_pkey_alloc(0, rights); + FAIL_IF(pkey < 0); + + /* + * Jump to the executable region. AMR bits may or may not + * be set but they should not affect execution. + * + * This should generate pkey faults based on IAMR bits which + * may be set to restrict execution. + * + * The first iteration also checks if the overwrite of the + * first instruction word from a trap to a no-op succeeded. + */ + fault_pkey = pkey; + fault_type = -1; + remaining_faults = 0; + if (rights & PKEY_DISABLE_EXECUTE) { + fault_type = PKEY_DISABLE_EXECUTE; + remaining_faults = 1; + } + + FAIL_IF(sys_pkey_mprotect(insns, pgsize, PROT_EXEC, pkey) != 0); + printf("execute at %p, pkey permissions are %s\n", fault_addr, + pkey_rights(rights)); + asm volatile("mtctr %0; bctrl" : : "r"(insns)); + FAIL_IF(remaining_faults != 0); + if (rights & PKEY_DISABLE_EXECUTE) + FAIL_IF(fault_code != SEGV_PKUERR); + + /* Free the current pkey */ + sys_pkey_free(pkey); + + /* Find next valid combination of pkey rights */ + rights = next_pkey_rights(rights); + } while (rights); + + /* Cleanup */ + munmap((void *) insns, pgsize); + + return 0; +} + +int main(void) +{ + return test_harness(test, "pkey_exec_prot"); +} diff --git a/tools/testing/selftests/powerpc/mm/pkey_siginfo.c b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c new file mode 100644 index 0000000000..2db76e56d4 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/pkey_siginfo.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2020, Sandipan Das, IBM Corp. + * + * Test if the signal information reports the correct memory protection + * key upon getting a key access violation fault for a page that was + * attempted to be protected by two different keys from two competing + * threads at the same time. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <signal.h> + +#include <unistd.h> +#include <pthread.h> +#include <sys/mman.h> + +#include "pkeys.h" + +#define PPC_INST_NOP 0x60000000 +#define PPC_INST_BLR 0x4e800020 +#define PROT_RWX (PROT_READ | PROT_WRITE | PROT_EXEC) + +#define NUM_ITERATIONS 1000000 + +static volatile sig_atomic_t perm_pkey, rest_pkey; +static volatile sig_atomic_t rights, fault_count; +static volatile unsigned int *volatile fault_addr; +static pthread_barrier_t iteration_barrier; + +static void segv_handler(int signum, siginfo_t *sinfo, void *ctx) +{ + void *pgstart; + size_t pgsize; + int pkey; + + pkey = siginfo_pkey(sinfo); + + /* Check if this fault originated from a pkey access violation */ + if (sinfo->si_code != SEGV_PKUERR) { + sigsafe_err("got a fault for an unexpected reason\n"); + _exit(1); + } + + /* Check if this fault originated from the expected address */ + if (sinfo->si_addr != (void *) fault_addr) { + sigsafe_err("got a fault for an unexpected address\n"); + _exit(1); + } + + /* Check if this fault originated from the restrictive pkey */ + if (pkey != rest_pkey) { + sigsafe_err("got a fault for an unexpected pkey\n"); + _exit(1); + } + + /* Check if too many faults have occurred for the same iteration */ + if (fault_count > 0) { + sigsafe_err("got too many faults for the same address\n"); + _exit(1); + } + + pgsize = getpagesize(); + pgstart = (void *) ((unsigned long) fault_addr & ~(pgsize - 1)); + + /* + * If the current fault occurred due to lack of execute rights, + * reassociate the page with the exec-only pkey since execute + * rights cannot be changed directly for the faulting pkey as + * IAMR is inaccessible from userspace. + * + * Otherwise, if the current fault occurred due to lack of + * read-write rights, change the AMR permission bits for the + * pkey. + * + * This will let the test continue. + */ + if (rights == PKEY_DISABLE_EXECUTE && + mprotect(pgstart, pgsize, PROT_EXEC)) + _exit(1); + else + pkey_set_rights(pkey, 0); + + fault_count++; +} + +struct region { + unsigned long rights; + unsigned int *base; + size_t size; +}; + +static void *protect(void *p) +{ + unsigned long rights; + unsigned int *base; + size_t size; + int tid, i; + + tid = gettid(); + base = ((struct region *) p)->base; + size = ((struct region *) p)->size; + FAIL_IF_EXIT(!base); + + /* No read, write and execute restrictions */ + rights = 0; + + printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights)); + + /* Allocate the permissive pkey */ + perm_pkey = sys_pkey_alloc(0, rights); + FAIL_IF_EXIT(perm_pkey < 0); + + /* + * Repeatedly try to protect the common region with a permissive + * pkey + */ + for (i = 0; i < NUM_ITERATIONS; i++) { + /* + * Wait until the other thread has finished allocating the + * restrictive pkey or until the next iteration has begun + */ + pthread_barrier_wait(&iteration_barrier); + + /* Try to associate the permissive pkey with the region */ + FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX, + perm_pkey)); + } + + /* Free the permissive pkey */ + sys_pkey_free(perm_pkey); + + return NULL; +} + +static void *protect_access(void *p) +{ + size_t size, numinsns; + unsigned int *base; + int tid, i; + + tid = gettid(); + base = ((struct region *) p)->base; + size = ((struct region *) p)->size; + rights = ((struct region *) p)->rights; + numinsns = size / sizeof(base[0]); + FAIL_IF_EXIT(!base); + + /* Allocate the restrictive pkey */ + rest_pkey = sys_pkey_alloc(0, rights); + FAIL_IF_EXIT(rest_pkey < 0); + + printf("tid %d, pkey permissions are %s\n", tid, pkey_rights(rights)); + printf("tid %d, %s randomly in range [%p, %p]\n", tid, + (rights == PKEY_DISABLE_EXECUTE) ? "execute" : + (rights == PKEY_DISABLE_WRITE) ? "write" : "read", + base, base + numinsns); + + /* + * Repeatedly try to protect the common region with a restrictive + * pkey and read, write or execute from it + */ + for (i = 0; i < NUM_ITERATIONS; i++) { + /* + * Wait until the other thread has finished allocating the + * permissive pkey or until the next iteration has begun + */ + pthread_barrier_wait(&iteration_barrier); + + /* Try to associate the restrictive pkey with the region */ + FAIL_IF_EXIT(sys_pkey_mprotect(base, size, PROT_RWX, + rest_pkey)); + + /* Choose a random instruction word address from the region */ + fault_addr = base + (rand() % numinsns); + fault_count = 0; + + switch (rights) { + /* Read protection test */ + case PKEY_DISABLE_ACCESS: + /* + * Read an instruction word from the region and + * verify if it has not been overwritten to + * something unexpected + */ + FAIL_IF_EXIT(*fault_addr != PPC_INST_NOP && + *fault_addr != PPC_INST_BLR); + break; + + /* Write protection test */ + case PKEY_DISABLE_WRITE: + /* + * Write an instruction word to the region and + * verify if the overwrite has succeeded + */ + *fault_addr = PPC_INST_BLR; + FAIL_IF_EXIT(*fault_addr != PPC_INST_BLR); + break; + + /* Execute protection test */ + case PKEY_DISABLE_EXECUTE: + /* Jump to the region and execute instructions */ + asm volatile( + "mtctr %0; bctrl" + : : "r"(fault_addr) : "ctr", "lr"); + break; + } + + /* + * Restore the restrictions originally imposed by the + * restrictive pkey as the signal handler would have + * cleared out the corresponding AMR bits + */ + pkey_set_rights(rest_pkey, rights); + } + + /* Free restrictive pkey */ + sys_pkey_free(rest_pkey); + + return NULL; +} + +static void reset_pkeys(unsigned long rights) +{ + int pkeys[NR_PKEYS], i; + + /* Exhaustively allocate all available pkeys */ + for (i = 0; i < NR_PKEYS; i++) + pkeys[i] = sys_pkey_alloc(0, rights); + + /* Free all allocated pkeys */ + for (i = 0; i < NR_PKEYS; i++) + sys_pkey_free(pkeys[i]); +} + +static int test(void) +{ + pthread_t prot_thread, pacc_thread; + struct sigaction act; + pthread_attr_t attr; + size_t numinsns; + struct region r; + int ret, i; + + srand(time(NULL)); + ret = pkeys_unsupported(); + if (ret) + return ret; + + /* Allocate the region */ + r.size = getpagesize(); + r.base = mmap(NULL, r.size, PROT_RWX, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + FAIL_IF(r.base == MAP_FAILED); + + /* + * Fill the region with no-ops with a branch at the end + * for returning to the caller + */ + numinsns = r.size / sizeof(r.base[0]); + for (i = 0; i < numinsns - 1; i++) + r.base[i] = PPC_INST_NOP; + r.base[i] = PPC_INST_BLR; + + /* Setup SIGSEGV handler */ + act.sa_handler = 0; + act.sa_sigaction = segv_handler; + FAIL_IF(sigprocmask(SIG_SETMASK, 0, &act.sa_mask) != 0); + act.sa_flags = SA_SIGINFO; + act.sa_restorer = 0; + FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0); + + /* + * For these tests, the parent process should clear all bits of + * AMR and IAMR, i.e. impose no restrictions, for all available + * pkeys. This will be the base for the initial AMR and IAMR + * values for all the test thread pairs. + * + * If the AMR and IAMR bits of all available pkeys are cleared + * before running the tests and a fault is generated when + * attempting to read, write or execute instructions from a + * pkey protected region, the pkey responsible for this must be + * the one from the protect-and-access thread since the other + * one is fully permissive. Despite that, if the pkey reported + * by siginfo is not the restrictive pkey, then there must be a + * kernel bug. + */ + reset_pkeys(0); + + /* Setup barrier for protect and protect-and-access threads */ + FAIL_IF(pthread_attr_init(&attr) != 0); + FAIL_IF(pthread_barrier_init(&iteration_barrier, NULL, 2) != 0); + + /* Setup and start protect and protect-and-read threads */ + puts("starting thread pair (protect, protect-and-read)"); + r.rights = PKEY_DISABLE_ACCESS; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Setup and start protect and protect-and-write threads */ + puts("starting thread pair (protect, protect-and-write)"); + r.rights = PKEY_DISABLE_WRITE; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Setup and start protect and protect-and-execute threads */ + puts("starting thread pair (protect, protect-and-execute)"); + r.rights = PKEY_DISABLE_EXECUTE; + FAIL_IF(pthread_create(&prot_thread, &attr, &protect, &r) != 0); + FAIL_IF(pthread_create(&pacc_thread, &attr, &protect_access, &r) != 0); + FAIL_IF(pthread_join(prot_thread, NULL) != 0); + FAIL_IF(pthread_join(pacc_thread, NULL) != 0); + + /* Cleanup */ + FAIL_IF(pthread_attr_destroy(&attr) != 0); + FAIL_IF(pthread_barrier_destroy(&iteration_barrier) != 0); + munmap(r.base, r.size); + + return 0; +} + +int main(void) +{ + return test_harness(test, "pkey_siginfo"); +} diff --git a/tools/testing/selftests/powerpc/mm/prot_sao.c b/tools/testing/selftests/powerpc/mm/prot_sao.c new file mode 100644 index 0000000000..30b71b1d78 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/prot_sao.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2016, Michael Ellerman, IBM Corp. + */ + +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <unistd.h> + +#include <asm/cputable.h> + +#include "utils.h" + +#define SIZE (64 * 1024) + +int test_prot_sao(void) +{ + char *p; + + /* + * SAO was introduced in 2.06 and removed in 3.1. It's disabled in + * guests/LPARs by default, so also skip if we are running in a guest. + */ + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) || + have_hwcap2(PPC_FEATURE2_ARCH_3_1) || + access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0); + + /* + * Ensure we can ask for PROT_SAO. + * We can't really verify that it does the right thing, but at least we + * confirm the kernel will accept it. + */ + p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + FAIL_IF(p == MAP_FAILED); + + /* Write to the mapping, to at least cause a fault */ + memset(p, 0xaa, SIZE); + + return 0; +} + +int main(void) +{ + return test_harness(test_prot_sao, "prot-sao"); +} diff --git a/tools/testing/selftests/powerpc/mm/segv_errors.c b/tools/testing/selftests/powerpc/mm/segv_errors.c new file mode 100644 index 0000000000..06ae76ee3e --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/segv_errors.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2017 John Sperbeck + * + * Test that an access to a mapped but inaccessible area causes a SEGV and + * reports si_code == SEGV_ACCERR. + */ + +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <signal.h> +#include <sys/mman.h> +#include <assert.h> +#include <ucontext.h> + +#include "utils.h" + +static bool faulted; +static int si_code; + +static void segv_handler(int n, siginfo_t *info, void *ctxt_v) +{ + ucontext_t *ctxt = (ucontext_t *)ctxt_v; + struct pt_regs *regs = ctxt->uc_mcontext.regs; + + faulted = true; + si_code = info->si_code; + regs->nip += 4; +} + +int test_segv_errors(void) +{ + struct sigaction act = { + .sa_sigaction = segv_handler, + .sa_flags = SA_SIGINFO, + }; + char c, *p = NULL; + + p = mmap(NULL, getpagesize(), 0, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + FAIL_IF(p == MAP_FAILED); + + FAIL_IF(sigaction(SIGSEGV, &act, NULL) != 0); + + faulted = false; + si_code = 0; + + /* + * We just need a compiler barrier, but mb() works and has the nice + * property of being easy to spot in the disassembly. + */ + mb(); + c = *p; + mb(); + + FAIL_IF(!faulted); + FAIL_IF(si_code != SEGV_ACCERR); + + faulted = false; + si_code = 0; + + mb(); + *p = c; + mb(); + + FAIL_IF(!faulted); + FAIL_IF(si_code != SEGV_ACCERR); + + return 0; +} + +int main(void) +{ + return test_harness(test_segv_errors, "segv_errors"); +} diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c new file mode 100644 index 0000000000..ed91439908 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that loads/stores expand the stack segment, or trigger a SEGV, in + * various conditions. + * + * Based on test code by Tom Lane. + */ + +#undef NDEBUG +#include <assert.h> + +#include <err.h> +#include <errno.h> +#include <stdio.h> +#include <signal.h> +#include <stdlib.h> +#include <string.h> +#include <sys/resource.h> +#include <sys/time.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#define _KB (1024) +#define _MB (1024 * 1024) + +volatile char *stack_top_ptr; +volatile unsigned long stack_top_sp; +volatile char c; + +enum access_type { + LOAD, + STORE, +}; + +/* + * Consume stack until the stack pointer is below @target_sp, then do an access + * (load or store) at offset @delta from either the base of the stack or the + * current stack pointer. + */ +__attribute__ ((noinline)) +int consume_stack(unsigned long target_sp, unsigned long stack_high, int delta, enum access_type type) +{ + unsigned long target; + char stack_cur; + + if ((unsigned long)&stack_cur > target_sp) + return consume_stack(target_sp, stack_high, delta, type); + else { + // We don't really need this, but without it GCC might not + // generate a recursive call above. + stack_top_ptr = &stack_cur; + +#ifdef __powerpc__ + asm volatile ("mr %[sp], %%r1" : [sp] "=r" (stack_top_sp)); +#else + asm volatile ("mov %%rsp, %[sp]" : [sp] "=r" (stack_top_sp)); +#endif + target = stack_high - delta + 1; + volatile char *p = (char *)target; + + if (type == STORE) + *p = c; + else + c = *p; + + // Do something to prevent the stack frame being popped prior to + // our access above. + getpid(); + } + + return 0; +} + +static int search_proc_maps(char *needle, unsigned long *low, unsigned long *high) +{ + unsigned long start, end; + static char buf[4096]; + char name[128]; + FILE *f; + int rc; + + f = fopen("/proc/self/maps", "r"); + if (!f) { + perror("fopen"); + return -1; + } + + while (fgets(buf, sizeof(buf), f)) { + rc = sscanf(buf, "%lx-%lx %*c%*c%*c%*c %*x %*d:%*d %*d %127s\n", + &start, &end, name); + if (rc == 2) + continue; + + if (rc != 3) { + printf("sscanf errored\n"); + rc = -1; + break; + } + + if (strstr(name, needle)) { + *low = start; + *high = end - 1; + rc = 0; + break; + } + } + + fclose(f); + + return rc; +} + +int child(unsigned int stack_used, int delta, enum access_type type) +{ + unsigned long low, stack_high; + + assert(search_proc_maps("[stack]", &low, &stack_high) == 0); + + assert(consume_stack(stack_high - stack_used, stack_high, delta, type) == 0); + + printf("Access OK: %s delta %-7d used size 0x%06x stack high 0x%lx top_ptr %p top sp 0x%lx actual used 0x%lx\n", + type == LOAD ? "load" : "store", delta, stack_used, stack_high, + stack_top_ptr, stack_top_sp, stack_high - stack_top_sp + 1); + + return 0; +} + +static int test_one(unsigned int stack_used, int delta, enum access_type type) +{ + pid_t pid; + int rc; + + pid = fork(); + if (pid == 0) + exit(child(stack_used, delta, type)); + + assert(waitpid(pid, &rc, 0) != -1); + + if (WIFEXITED(rc) && WEXITSTATUS(rc) == 0) + return 0; + + // We don't expect a non-zero exit that's not a signal + assert(!WIFEXITED(rc)); + + printf("Faulted: %s delta %-7d used size 0x%06x signal %d\n", + type == LOAD ? "load" : "store", delta, stack_used, + WTERMSIG(rc)); + + return 1; +} + +// This is fairly arbitrary but is well below any of the targets below, +// so that the delta between the stack pointer and the target is large. +#define DEFAULT_SIZE (32 * _KB) + +static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur) +{ + unsigned long delta; + + // We should be able to access anywhere within the rlimit + for (delta = page_size; delta <= rlim_cur; delta += page_size) + assert(test_one(DEFAULT_SIZE, delta, type) == 0); + + assert(test_one(DEFAULT_SIZE, rlim_cur, type) == 0); + + // But if we go past the rlimit it should fail + assert(test_one(DEFAULT_SIZE, rlim_cur + 1, type) != 0); +} + +static int test(void) +{ + unsigned long page_size; + struct rlimit rlimit; + + page_size = getpagesize(); + getrlimit(RLIMIT_STACK, &rlimit); + printf("Stack rlimit is 0x%lx\n", rlimit.rlim_cur); + + printf("Testing loads ...\n"); + test_one_type(LOAD, page_size, rlimit.rlim_cur); + printf("Testing stores ...\n"); + test_one_type(STORE, page_size, rlimit.rlim_cur); + + printf("All OK\n"); + + return 0; +} + +#ifdef __powerpc__ +#include "utils.h" + +int main(void) +{ + return test_harness(test, "stack_expansion_ldst"); +} +#else +int main(void) +{ + return test(); +} +#endif diff --git a/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c new file mode 100644 index 0000000000..c8b32a29e2 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/stack_expansion_signal.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Test that signal delivery is able to expand the stack segment without + * triggering a SEGV. + * + * Based on test code by Tom Lane. + */ + +#include <err.h> +#include <stdio.h> +#include <stdlib.h> +#include <signal.h> +#include <sys/types.h> +#include <unistd.h> + +#include "../pmu/lib.h" +#include "utils.h" + +#define _KB (1024) +#define _MB (1024 * 1024) + +static char *stack_base_ptr; +static char *stack_top_ptr; + +static volatile sig_atomic_t sig_occurred = 0; + +static void sigusr1_handler(int signal_arg) +{ + sig_occurred = 1; +} + +static int consume_stack(unsigned int stack_size, union pipe write_pipe) +{ + char stack_cur; + + if ((stack_base_ptr - &stack_cur) < stack_size) + return consume_stack(stack_size, write_pipe); + else { + stack_top_ptr = &stack_cur; + + FAIL_IF(notify_parent(write_pipe)); + + while (!sig_occurred) + barrier(); + } + + return 0; +} + +static int child(unsigned int stack_size, union pipe write_pipe) +{ + struct sigaction act; + char stack_base; + + act.sa_handler = sigusr1_handler; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + if (sigaction(SIGUSR1, &act, NULL) < 0) + err(1, "sigaction"); + + stack_base_ptr = (char *) (((size_t) &stack_base + 65535) & ~65535UL); + + FAIL_IF(consume_stack(stack_size, write_pipe)); + + printf("size 0x%06x: OK, stack base %p top %p (%zx used)\n", + stack_size, stack_base_ptr, stack_top_ptr, + stack_base_ptr - stack_top_ptr); + + return 0; +} + +static int test_one_size(unsigned int stack_size) +{ + union pipe read_pipe, write_pipe; + pid_t pid; + + FAIL_IF(pipe(read_pipe.fds) == -1); + FAIL_IF(pipe(write_pipe.fds) == -1); + + pid = fork(); + if (pid == 0) { + close(read_pipe.read_fd); + close(write_pipe.write_fd); + exit(child(stack_size, read_pipe)); + } + + close(read_pipe.write_fd); + close(write_pipe.read_fd); + FAIL_IF(sync_with_child(read_pipe, write_pipe)); + + kill(pid, SIGUSR1); + + FAIL_IF(wait_for_child(pid)); + + close(read_pipe.read_fd); + close(write_pipe.write_fd); + + return 0; +} + +int test(void) +{ + unsigned int i, size; + + // Test with used stack from 1MB - 64K to 1MB + 64K + // Increment by 64 to get more coverage of odd sizes + for (i = 0; i < (128 * _KB); i += 64) { + size = i + (1 * _MB) - (64 * _KB); + FAIL_IF(test_one_size(size)); + } + + return 0; +} + +int main(void) +{ + return test_harness(test, "stack_expansion_signal"); +} diff --git a/tools/testing/selftests/powerpc/mm/stress_code_patching.sh b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh new file mode 100755 index 0000000000..e454509659 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/stress_code_patching.sh @@ -0,0 +1,49 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0-or-later + +TIMEOUT=30 + +DEBUFS_DIR=`cat /proc/mounts | grep debugfs | awk '{print $2}'` +if [ ! -e "$DEBUFS_DIR" ] +then + echo "debugfs not found, skipping" 1>&2 + exit 4 +fi + +if [ ! -e "$DEBUFS_DIR/tracing/current_tracer" ] +then + echo "Tracing files not found, skipping" 1>&2 + exit 4 +fi + + +echo "Testing for spurious faults when mapping kernel memory..." + +if grep -q "FUNCTION TRACING IS CORRUPTED" "$DEBUFS_DIR/tracing/trace" +then + echo "FAILED: Ftrace already dead. Probably due to a spurious fault" 1>&2 + exit 1 +fi + +dmesg -C +START_TIME=`date +%s` +END_TIME=`expr $START_TIME + $TIMEOUT` +while [ `date +%s` -lt $END_TIME ] +do + echo function > $DEBUFS_DIR/tracing/current_tracer + echo nop > $DEBUFS_DIR/tracing/current_tracer + if dmesg | grep -q 'ftrace bug' + then + break + fi +done + +echo nop > $DEBUFS_DIR/tracing/current_tracer +if dmesg | grep -q 'ftrace bug' +then + echo "FAILED: Mapping kernel memory causes spurious faults" 1>&2 + exit 1 +else + echo "OK: Mapping kernel memory does not cause spurious faults" + exit 0 +fi diff --git a/tools/testing/selftests/powerpc/mm/subpage_prot.c b/tools/testing/selftests/powerpc/mm/subpage_prot.c new file mode 100644 index 0000000000..3ae77ba932 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/subpage_prot.c @@ -0,0 +1,236 @@ +/* + * Copyright IBM Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include <assert.h> +#include <errno.h> +#include <fcntl.h> +#include <signal.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/ptrace.h> +#include <sys/syscall.h> +#include <ucontext.h> +#include <unistd.h> + +#include "utils.h" + +char *file_name; + +int in_test; +volatile int faulted; +volatile void *dar; +int errors; + +static void segv(int signum, siginfo_t *info, void *ctxt_v) +{ + ucontext_t *ctxt = (ucontext_t *)ctxt_v; + struct pt_regs *regs = ctxt->uc_mcontext.regs; + + if (!in_test) { + fprintf(stderr, "Segfault outside of test !\n"); + exit(1); + } + + faulted = 1; + dar = (void *)regs->dar; + regs->nip += 4; +} + +static inline void do_read(const volatile void *addr) +{ + int ret; + + asm volatile("lwz %0,0(%1); twi 0,%0,0; isync;\n" + : "=r" (ret) : "r" (addr) : "memory"); +} + +static inline void do_write(const volatile void *addr) +{ + int val = 0x1234567; + + asm volatile("stw %0,0(%1); sync; \n" + : : "r" (val), "r" (addr) : "memory"); +} + +static inline void check_faulted(void *addr, long page, long subpage, int write) +{ + int want_fault = (subpage == ((page + 3) % 16)); + + if (write) + want_fault |= (subpage == ((page + 1) % 16)); + + if (faulted != want_fault) { + printf("Failed at %p (p=%ld,sp=%ld,w=%d), want=%s, got=%s !\n", + addr, page, subpage, write, + want_fault ? "fault" : "pass", + faulted ? "fault" : "pass"); + ++errors; + } + + if (faulted) { + if (dar != addr) { + printf("Fault expected at %p and happened at %p !\n", + addr, dar); + } + faulted = 0; + asm volatile("sync" : : : "memory"); + } +} + +static int run_test(void *addr, unsigned long size) +{ + unsigned int *map; + long i, j, pages, err; + + pages = size / 0x10000; + map = malloc(pages * 4); + assert(map); + + /* + * for each page, mark subpage i % 16 read only and subpage + * (i + 3) % 16 inaccessible + */ + for (i = 0; i < pages; i++) { + map[i] = (0x40000000 >> (((i + 1) * 2) % 32)) | + (0xc0000000 >> (((i + 3) * 2) % 32)); + } + + err = syscall(__NR_subpage_prot, addr, size, map); + if (err) { + perror("subpage_perm"); + return 1; + } + free(map); + + in_test = 1; + errors = 0; + for (i = 0; i < pages; i++) { + for (j = 0; j < 16; j++, addr += 0x1000) { + do_read(addr); + check_faulted(addr, i, j, 0); + do_write(addr); + check_faulted(addr, i, j, 1); + } + } + + in_test = 0; + if (errors) { + printf("%d errors detected\n", errors); + return 1; + } + + return 0; +} + +static int syscall_available(void) +{ + int rc; + + errno = 0; + rc = syscall(__NR_subpage_prot, 0, 0, 0); + + return rc == 0 || (errno != ENOENT && errno != ENOSYS); +} + +int test_anon(void) +{ + unsigned long align; + struct sigaction act = { + .sa_sigaction = segv, + .sa_flags = SA_SIGINFO + }; + void *mallocblock; + unsigned long mallocsize; + + SKIP_IF(!syscall_available()); + + if (getpagesize() != 0x10000) { + fprintf(stderr, "Kernel page size must be 64K!\n"); + return 1; + } + + sigaction(SIGSEGV, &act, NULL); + + mallocsize = 4 * 16 * 1024 * 1024; + + FAIL_IF(posix_memalign(&mallocblock, 64 * 1024, mallocsize)); + + align = (unsigned long)mallocblock; + if (align & 0xffff) + align = (align | 0xffff) + 1; + + mallocblock = (void *)align; + + printf("allocated malloc block of 0x%lx bytes at %p\n", + mallocsize, mallocblock); + + printf("testing malloc block...\n"); + + return run_test(mallocblock, mallocsize); +} + +int test_file(void) +{ + struct sigaction act = { + .sa_sigaction = segv, + .sa_flags = SA_SIGINFO + }; + void *fileblock; + off_t filesize; + int fd; + + SKIP_IF(!syscall_available()); + + fd = open(file_name, O_RDWR); + if (fd == -1) { + perror("failed to open file"); + return 1; + } + sigaction(SIGSEGV, &act, NULL); + + filesize = lseek(fd, 0, SEEK_END); + if (filesize & 0xffff) + filesize &= ~0xfffful; + + fileblock = mmap(NULL, filesize, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + if (fileblock == MAP_FAILED) { + perror("failed to map file"); + return 1; + } + printf("allocated %s for 0x%lx bytes at %p\n", + file_name, filesize, fileblock); + + printf("testing file map...\n"); + + return run_test(fileblock, filesize); +} + +int main(int argc, char *argv[]) +{ + int rc; + + rc = test_harness(test_anon, "subpage_prot_anon"); + if (rc) + return rc; + + if (argc > 1) + file_name = argv[1]; + else + file_name = "tempfile"; + + return test_harness(test_file, "subpage_prot_file"); +} diff --git a/tools/testing/selftests/powerpc/mm/tlbie_test.c b/tools/testing/selftests/powerpc/mm/tlbie_test.c new file mode 100644 index 0000000000..48344a74b2 --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/tlbie_test.c @@ -0,0 +1,733 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2019, Nick Piggin, Gautham R. Shenoy, Aneesh Kumar K.V, IBM Corp. + */ + +/* + * + * Test tlbie/mtpidr race. We have 4 threads doing flush/load/compare/store + * sequence in a loop. The same threads also rung a context switch task + * that does sched_yield() in loop. + * + * The snapshot thread mark the mmap area PROT_READ in between, make a copy + * and copy it back to the original area. This helps us to detect if any + * store continued to happen after we marked the memory PROT_READ. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <sys/ipc.h> +#include <sys/shm.h> +#include <sys/stat.h> +#include <sys/time.h> +#include <linux/futex.h> +#include <unistd.h> +#include <asm/unistd.h> +#include <string.h> +#include <stdlib.h> +#include <fcntl.h> +#include <sched.h> +#include <time.h> +#include <stdarg.h> +#include <pthread.h> +#include <signal.h> +#include <sys/prctl.h> + +static inline void dcbf(volatile unsigned int *addr) +{ + __asm__ __volatile__ ("dcbf %y0; sync" : : "Z"(*(unsigned char *)addr) : "memory"); +} + +static void err_msg(char *msg) +{ + + time_t now; + time(&now); + printf("=================================\n"); + printf(" Error: %s\n", msg); + printf(" %s", ctime(&now)); + printf("=================================\n"); + exit(1); +} + +static char *map1; +static char *map2; +static pid_t rim_process_pid; + +/* + * A "rim-sequence" is defined to be the sequence of the following + * operations performed on a memory word: + * 1) FLUSH the contents of that word. + * 2) LOAD the contents of that word. + * 3) COMPARE the contents of that word with the content that was + * previously stored at that word + * 4) STORE new content into that word. + * + * The threads in this test that perform the rim-sequence are termed + * as rim_threads. + */ + +/* + * A "corruption" is defined to be the failed COMPARE operation in a + * rim-sequence. + * + * A rim_thread that detects a corruption informs about it to all the + * other rim_threads, and the mem_snapshot thread. + */ +static volatile unsigned int corruption_found; + +/* + * This defines the maximum number of rim_threads in this test. + * + * The THREAD_ID_BITS denote the number of bits required + * to represent the thread_ids [0..MAX_THREADS - 1]. + * We are being a bit paranoid here and set it to 8 bits, + * though 6 bits suffice. + * + */ +#define MAX_THREADS 64 +#define THREAD_ID_BITS 8 +#define THREAD_ID_MASK ((1 << THREAD_ID_BITS) - 1) +static unsigned int rim_thread_ids[MAX_THREADS]; +static pthread_t rim_threads[MAX_THREADS]; + + +/* + * Each rim_thread works on an exclusive "chunk" of size + * RIM_CHUNK_SIZE. + * + * The ith rim_thread works on the ith chunk. + * + * The ith chunk begins at + * map1 + (i * RIM_CHUNK_SIZE) + */ +#define RIM_CHUNK_SIZE 1024 +#define BITS_PER_BYTE 8 +#define WORD_SIZE (sizeof(unsigned int)) +#define WORD_BITS (WORD_SIZE * BITS_PER_BYTE) +#define WORDS_PER_CHUNK (RIM_CHUNK_SIZE/WORD_SIZE) + +static inline char *compute_chunk_start_addr(unsigned int thread_id) +{ + char *chunk_start; + + chunk_start = (char *)((unsigned long)map1 + + (thread_id * RIM_CHUNK_SIZE)); + + return chunk_start; +} + +/* + * The "word-offset" of a word-aligned address inside a chunk, is + * defined to be the number of words that precede the address in that + * chunk. + * + * WORD_OFFSET_BITS denote the number of bits required to represent + * the word-offsets of all the word-aligned addresses of a chunk. + */ +#define WORD_OFFSET_BITS (__builtin_ctz(WORDS_PER_CHUNK)) +#define WORD_OFFSET_MASK ((1 << WORD_OFFSET_BITS) - 1) + +static inline unsigned int compute_word_offset(char *start, unsigned int *addr) +{ + unsigned int delta_bytes, ret; + delta_bytes = (unsigned long)addr - (unsigned long)start; + + ret = delta_bytes/WORD_SIZE; + + return ret; +} + +/* + * A "sweep" is defined to be the sequential execution of the + * rim-sequence by a rim_thread on its chunk one word at a time, + * starting from the first word of its chunk and ending with the last + * word of its chunk. + * + * Each sweep of a rim_thread is uniquely identified by a sweep_id. + * SWEEP_ID_BITS denote the number of bits required to represent + * the sweep_ids of rim_threads. + * + * As to why SWEEP_ID_BITS are computed as a function of THREAD_ID_BITS, + * WORD_OFFSET_BITS, and WORD_BITS, see the "store-pattern" below. + */ +#define SWEEP_ID_BITS (WORD_BITS - (THREAD_ID_BITS + WORD_OFFSET_BITS)) +#define SWEEP_ID_MASK ((1 << SWEEP_ID_BITS) - 1) + +/* + * A "store-pattern" is the word-pattern that is stored into a word + * location in the 4)STORE step of the rim-sequence. + * + * In the store-pattern, we shall encode: + * + * - The thread-id of the rim_thread performing the store + * (The most significant THREAD_ID_BITS) + * + * - The word-offset of the address into which the store is being + * performed (The next WORD_OFFSET_BITS) + * + * - The sweep_id of the current sweep in which the store is + * being performed. (The lower SWEEP_ID_BITS) + * + * Store Pattern: 32 bits + * |------------------|--------------------|---------------------------------| + * | Thread id | Word offset | sweep_id | + * |------------------|--------------------|---------------------------------| + * THREAD_ID_BITS WORD_OFFSET_BITS SWEEP_ID_BITS + * + * In the store pattern, the (Thread-id + Word-offset) uniquely identify the + * address to which the store is being performed i.e, + * address == map1 + + * (Thread-id * RIM_CHUNK_SIZE) + (Word-offset * WORD_SIZE) + * + * And the sweep_id in the store pattern identifies the time when the + * store was performed by the rim_thread. + * + * We shall use this property in the 3)COMPARE step of the + * rim-sequence. + */ +#define SWEEP_ID_SHIFT 0 +#define WORD_OFFSET_SHIFT (SWEEP_ID_BITS) +#define THREAD_ID_SHIFT (WORD_OFFSET_BITS + SWEEP_ID_BITS) + +/* + * Compute the store pattern for a given thread with id @tid, at + * location @addr in the sweep identified by @sweep_id + */ +static inline unsigned int compute_store_pattern(unsigned int tid, + unsigned int *addr, + unsigned int sweep_id) +{ + unsigned int ret = 0; + char *start = compute_chunk_start_addr(tid); + unsigned int word_offset = compute_word_offset(start, addr); + + ret += (tid & THREAD_ID_MASK) << THREAD_ID_SHIFT; + ret += (word_offset & WORD_OFFSET_MASK) << WORD_OFFSET_SHIFT; + ret += (sweep_id & SWEEP_ID_MASK) << SWEEP_ID_SHIFT; + return ret; +} + +/* Extract the thread-id from the given store-pattern */ +static inline unsigned int extract_tid(unsigned int pattern) +{ + unsigned int ret; + + ret = (pattern >> THREAD_ID_SHIFT) & THREAD_ID_MASK; + return ret; +} + +/* Extract the word-offset from the given store-pattern */ +static inline unsigned int extract_word_offset(unsigned int pattern) +{ + unsigned int ret; + + ret = (pattern >> WORD_OFFSET_SHIFT) & WORD_OFFSET_MASK; + + return ret; +} + +/* Extract the sweep-id from the given store-pattern */ +static inline unsigned int extract_sweep_id(unsigned int pattern) + +{ + unsigned int ret; + + ret = (pattern >> SWEEP_ID_SHIFT) & SWEEP_ID_MASK; + + return ret; +} + +/************************************************************ + * * + * Logging the output of the verification * + * * + ************************************************************/ +#define LOGDIR_NAME_SIZE 100 +static char logdir[LOGDIR_NAME_SIZE]; + +static FILE *fp[MAX_THREADS]; +static const char logfilename[] ="Thread-%02d-Chunk"; + +static inline void start_verification_log(unsigned int tid, + unsigned int *addr, + unsigned int cur_sweep_id, + unsigned int prev_sweep_id) +{ + FILE *f; + char logfile[30]; + char path[LOGDIR_NAME_SIZE + 30]; + char separator[2] = "/"; + char *chunk_start = compute_chunk_start_addr(tid); + unsigned int size = RIM_CHUNK_SIZE; + + sprintf(logfile, logfilename, tid); + strcpy(path, logdir); + strcat(path, separator); + strcat(path, logfile); + f = fopen(path, "w"); + + if (!f) { + err_msg("Unable to create logfile\n"); + } + + fp[tid] = f; + + fprintf(f, "----------------------------------------------------------\n"); + fprintf(f, "PID = %d\n", rim_process_pid); + fprintf(f, "Thread id = %02d\n", tid); + fprintf(f, "Chunk Start Addr = 0x%016lx\n", (unsigned long)chunk_start); + fprintf(f, "Chunk Size = %d\n", size); + fprintf(f, "Next Store Addr = 0x%016lx\n", (unsigned long)addr); + fprintf(f, "Current sweep-id = 0x%08x\n", cur_sweep_id); + fprintf(f, "Previous sweep-id = 0x%08x\n", prev_sweep_id); + fprintf(f, "----------------------------------------------------------\n"); +} + +static inline void log_anamoly(unsigned int tid, unsigned int *addr, + unsigned int expected, unsigned int observed) +{ + FILE *f = fp[tid]; + + fprintf(f, "Thread %02d: Addr 0x%lx: Expected 0x%x, Observed 0x%x\n", + tid, (unsigned long)addr, expected, observed); + fprintf(f, "Thread %02d: Expected Thread id = %02d\n", tid, extract_tid(expected)); + fprintf(f, "Thread %02d: Observed Thread id = %02d\n", tid, extract_tid(observed)); + fprintf(f, "Thread %02d: Expected Word offset = %03d\n", tid, extract_word_offset(expected)); + fprintf(f, "Thread %02d: Observed Word offset = %03d\n", tid, extract_word_offset(observed)); + fprintf(f, "Thread %02d: Expected sweep-id = 0x%x\n", tid, extract_sweep_id(expected)); + fprintf(f, "Thread %02d: Observed sweep-id = 0x%x\n", tid, extract_sweep_id(observed)); + fprintf(f, "----------------------------------------------------------\n"); +} + +static inline void end_verification_log(unsigned int tid, unsigned nr_anamolies) +{ + FILE *f = fp[tid]; + char logfile[30]; + char path[LOGDIR_NAME_SIZE + 30]; + char separator[] = "/"; + + fclose(f); + + if (nr_anamolies == 0) { + remove(path); + return; + } + + sprintf(logfile, logfilename, tid); + strcpy(path, logdir); + strcat(path, separator); + strcat(path, logfile); + + printf("Thread %02d chunk has %d corrupted words. For details check %s\n", + tid, nr_anamolies, path); +} + +/* + * When a COMPARE step of a rim-sequence fails, the rim_thread informs + * everyone else via the shared_memory pointed to by + * corruption_found variable. On seeing this, every thread verifies the + * content of its chunk as follows. + * + * Suppose a thread identified with @tid was about to store (but not + * yet stored) to @next_store_addr in its current sweep identified + * @cur_sweep_id. Let @prev_sweep_id indicate the previous sweep_id. + * + * This implies that for all the addresses @addr < @next_store_addr, + * Thread @tid has already performed a store as part of its current + * sweep. Hence we expect the content of such @addr to be: + * |-------------------------------------------------| + * | tid | word_offset(addr) | cur_sweep_id | + * |-------------------------------------------------| + * + * Since Thread @tid is yet to perform stores on address + * @next_store_addr and above, we expect the content of such an + * address @addr to be: + * |-------------------------------------------------| + * | tid | word_offset(addr) | prev_sweep_id | + * |-------------------------------------------------| + * + * The verifier function @verify_chunk does this verification and logs + * any anamolies that it finds. + */ +static void verify_chunk(unsigned int tid, unsigned int *next_store_addr, + unsigned int cur_sweep_id, + unsigned int prev_sweep_id) +{ + unsigned int *iter_ptr; + unsigned int size = RIM_CHUNK_SIZE; + unsigned int expected; + unsigned int observed; + char *chunk_start = compute_chunk_start_addr(tid); + + int nr_anamolies = 0; + + start_verification_log(tid, next_store_addr, + cur_sweep_id, prev_sweep_id); + + for (iter_ptr = (unsigned int *)chunk_start; + (unsigned long)iter_ptr < (unsigned long)chunk_start + size; + iter_ptr++) { + unsigned int expected_sweep_id; + + if (iter_ptr < next_store_addr) { + expected_sweep_id = cur_sweep_id; + } else { + expected_sweep_id = prev_sweep_id; + } + + expected = compute_store_pattern(tid, iter_ptr, expected_sweep_id); + + dcbf((volatile unsigned int*)iter_ptr); //Flush before reading + observed = *iter_ptr; + + if (observed != expected) { + nr_anamolies++; + log_anamoly(tid, iter_ptr, expected, observed); + } + } + + end_verification_log(tid, nr_anamolies); +} + +static void set_pthread_cpu(pthread_t th, int cpu) +{ + cpu_set_t run_cpu_mask; + struct sched_param param; + + CPU_ZERO(&run_cpu_mask); + CPU_SET(cpu, &run_cpu_mask); + pthread_setaffinity_np(th, sizeof(cpu_set_t), &run_cpu_mask); + + param.sched_priority = 1; + if (0 && sched_setscheduler(0, SCHED_FIFO, ¶m) == -1) { + /* haven't reproduced with this setting, it kills random preemption which may be a factor */ + fprintf(stderr, "could not set SCHED_FIFO, run as root?\n"); + } +} + +static void set_mycpu(int cpu) +{ + cpu_set_t run_cpu_mask; + struct sched_param param; + + CPU_ZERO(&run_cpu_mask); + CPU_SET(cpu, &run_cpu_mask); + sched_setaffinity(0, sizeof(cpu_set_t), &run_cpu_mask); + + param.sched_priority = 1; + if (0 && sched_setscheduler(0, SCHED_FIFO, ¶m) == -1) { + fprintf(stderr, "could not set SCHED_FIFO, run as root?\n"); + } +} + +static volatile int segv_wait; + +static void segv_handler(int signo, siginfo_t *info, void *extra) +{ + while (segv_wait) { + sched_yield(); + } + +} + +static void set_segv_handler(void) +{ + struct sigaction sa; + + sa.sa_flags = SA_SIGINFO; + sa.sa_sigaction = segv_handler; + + if (sigaction(SIGSEGV, &sa, NULL) == -1) { + perror("sigaction"); + exit(EXIT_FAILURE); + } +} + +int timeout = 0; +/* + * This function is executed by every rim_thread. + * + * This function performs sweeps over the exclusive chunks of the + * rim_threads executing the rim-sequence one word at a time. + */ +static void *rim_fn(void *arg) +{ + unsigned int tid = *((unsigned int *)arg); + + int size = RIM_CHUNK_SIZE; + char *chunk_start = compute_chunk_start_addr(tid); + + unsigned int prev_sweep_id; + unsigned int cur_sweep_id = 0; + + /* word access */ + unsigned int pattern = cur_sweep_id; + unsigned int *pattern_ptr = &pattern; + unsigned int *w_ptr, read_data; + + set_segv_handler(); + + /* + * Let us initialize the chunk: + * + * Each word-aligned address addr in the chunk, + * is initialized to : + * |-------------------------------------------------| + * | tid | word_offset(addr) | 0 | + * |-------------------------------------------------| + */ + for (w_ptr = (unsigned int *)chunk_start; + (unsigned long)w_ptr < (unsigned long)(chunk_start) + size; + w_ptr++) { + + *pattern_ptr = compute_store_pattern(tid, w_ptr, cur_sweep_id); + *w_ptr = *pattern_ptr; + } + + while (!corruption_found && !timeout) { + prev_sweep_id = cur_sweep_id; + cur_sweep_id = cur_sweep_id + 1; + + for (w_ptr = (unsigned int *)chunk_start; + (unsigned long)w_ptr < (unsigned long)(chunk_start) + size; + w_ptr++) { + unsigned int old_pattern; + + /* + * Compute the pattern that we would have + * stored at this location in the previous + * sweep. + */ + old_pattern = compute_store_pattern(tid, w_ptr, prev_sweep_id); + + /* + * FLUSH:Ensure that we flush the contents of + * the cache before loading + */ + dcbf((volatile unsigned int*)w_ptr); //Flush + + /* LOAD: Read the value */ + read_data = *w_ptr; //Load + + /* + * COMPARE: Is it the same as what we had stored + * in the previous sweep ? It better be! + */ + if (read_data != old_pattern) { + /* No it isn't! Tell everyone */ + corruption_found = 1; + } + + /* + * Before performing a store, let us check if + * any rim_thread has found a corruption. + */ + if (corruption_found || timeout) { + /* + * Yes. Someone (including us!) has found + * a corruption :( + * + * Let us verify that our chunk is + * correct. + */ + /* But first, let us allow the dust to settle down! */ + verify_chunk(tid, w_ptr, cur_sweep_id, prev_sweep_id); + + return 0; + } + + /* + * Compute the new pattern that we are going + * to write to this location + */ + *pattern_ptr = compute_store_pattern(tid, w_ptr, cur_sweep_id); + + /* + * STORE: Now let us write this pattern into + * the location + */ + *w_ptr = *pattern_ptr; + } + } + + return NULL; +} + + +static unsigned long start_cpu = 0; +static unsigned long nrthreads = 4; + +static pthread_t mem_snapshot_thread; + +static void *mem_snapshot_fn(void *arg) +{ + int page_size = getpagesize(); + size_t size = page_size; + void *tmp = malloc(size); + + while (!corruption_found && !timeout) { + /* Stop memory migration once corruption is found */ + segv_wait = 1; + + mprotect(map1, size, PROT_READ); + + /* + * Load from the working alias (map1). Loading from map2 + * also fails. + */ + memcpy(tmp, map1, size); + + /* + * Stores must go via map2 which has write permissions, but + * the corrupted data tends to be seen in the snapshot buffer, + * so corruption does not appear to be introduced at the + * copy-back via map2 alias here. + */ + memcpy(map2, tmp, size); + /* + * Before releasing other threads, must ensure the copy + * back to + */ + asm volatile("sync" ::: "memory"); + mprotect(map1, size, PROT_READ|PROT_WRITE); + asm volatile("sync" ::: "memory"); + segv_wait = 0; + + usleep(1); /* This value makes a big difference */ + } + + return 0; +} + +void alrm_sighandler(int sig) +{ + timeout = 1; +} + +int main(int argc, char *argv[]) +{ + int c; + int page_size = getpagesize(); + time_t now; + int i, dir_error; + pthread_attr_t attr; + key_t shm_key = (key_t) getpid(); + int shmid, run_time = 20 * 60; + struct sigaction sa_alrm; + + snprintf(logdir, LOGDIR_NAME_SIZE, + "/tmp/logdir-%u", (unsigned int)getpid()); + while ((c = getopt(argc, argv, "r:hn:l:t:")) != -1) { + switch(c) { + case 'r': + start_cpu = strtoul(optarg, NULL, 10); + break; + case 'h': + printf("%s [-r <start_cpu>] [-n <nrthreads>] [-l <logdir>] [-t <timeout>]\n", argv[0]); + exit(0); + break; + case 'n': + nrthreads = strtoul(optarg, NULL, 10); + break; + case 'l': + strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1); + break; + case 't': + run_time = strtoul(optarg, NULL, 10); + break; + default: + printf("invalid option\n"); + exit(0); + break; + } + } + + if (nrthreads > MAX_THREADS) + nrthreads = MAX_THREADS; + + shmid = shmget(shm_key, page_size, IPC_CREAT|0666); + if (shmid < 0) { + err_msg("Failed shmget\n"); + } + + map1 = shmat(shmid, NULL, 0); + if (map1 == (void *) -1) { + err_msg("Failed shmat"); + } + + map2 = shmat(shmid, NULL, 0); + if (map2 == (void *) -1) { + err_msg("Failed shmat"); + } + + dir_error = mkdir(logdir, 0755); + + if (dir_error) { + err_msg("Failed mkdir"); + } + + printf("start_cpu list:%lu\n", start_cpu); + printf("number of worker threads:%lu + 1 snapshot thread\n", nrthreads); + printf("Allocated address:0x%016lx + secondary map:0x%016lx\n", (unsigned long)map1, (unsigned long)map2); + printf("logdir at : %s\n", logdir); + printf("Timeout: %d seconds\n", run_time); + + time(&now); + printf("=================================\n"); + printf(" Starting Test\n"); + printf(" %s", ctime(&now)); + printf("=================================\n"); + + for (i = 0; i < nrthreads; i++) { + if (1 && !fork()) { + prctl(PR_SET_PDEATHSIG, SIGKILL); + set_mycpu(start_cpu + i); + for (;;) + sched_yield(); + exit(0); + } + } + + + sa_alrm.sa_handler = &alrm_sighandler; + sigemptyset(&sa_alrm.sa_mask); + sa_alrm.sa_flags = 0; + + if (sigaction(SIGALRM, &sa_alrm, 0) == -1) { + err_msg("Failed signal handler registration\n"); + } + + alarm(run_time); + + pthread_attr_init(&attr); + for (i = 0; i < nrthreads; i++) { + rim_thread_ids[i] = i; + pthread_create(&rim_threads[i], &attr, rim_fn, &rim_thread_ids[i]); + set_pthread_cpu(rim_threads[i], start_cpu + i); + } + + pthread_create(&mem_snapshot_thread, &attr, mem_snapshot_fn, map1); + set_pthread_cpu(mem_snapshot_thread, start_cpu + i); + + + pthread_join(mem_snapshot_thread, NULL); + for (i = 0; i < nrthreads; i++) { + pthread_join(rim_threads[i], NULL); + } + + if (!timeout) { + time(&now); + printf("=================================\n"); + printf(" Data Corruption Detected\n"); + printf(" %s", ctime(&now)); + printf(" See logfiles in %s\n", logdir); + printf("=================================\n"); + return 1; + } + return 0; +} diff --git a/tools/testing/selftests/powerpc/mm/wild_bctr.c b/tools/testing/selftests/powerpc/mm/wild_bctr.c new file mode 100644 index 0000000000..f2fa101c5a --- /dev/null +++ b/tools/testing/selftests/powerpc/mm/wild_bctr.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018, Michael Ellerman, IBM Corp. + * + * Test that an out-of-bounds branch to counter behaves as expected. + */ + +#include <setjmp.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <ucontext.h> +#include <unistd.h> + +#include "utils.h" + + +#define BAD_NIP 0x788c545a18000000ull + +static struct pt_regs signal_regs; +static jmp_buf setjmp_env; + +static void save_regs(ucontext_t *ctxt) +{ + struct pt_regs *regs = ctxt->uc_mcontext.regs; + + memcpy(&signal_regs, regs, sizeof(signal_regs)); +} + +static void segv_handler(int signum, siginfo_t *info, void *ctxt_v) +{ + save_regs(ctxt_v); + longjmp(setjmp_env, 1); +} + +static void usr2_handler(int signum, siginfo_t *info, void *ctxt_v) +{ + save_regs(ctxt_v); +} + +static int ok(void) +{ + printf("Everything is OK in here.\n"); + return 0; +} + +#define REG_POISON 0x5a5a +#define POISONED_REG(n) ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \ + (((unsigned long)REG_POISON) << 16) | (n)) + +static inline void poison_regs(void) +{ + #define POISON_REG(n) \ + "lis " __stringify(n) "," __stringify(REG_POISON) ";" \ + "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" \ + "sldi " __stringify(n) "," __stringify(n) ", 32 ;" \ + "oris " __stringify(n) "," __stringify(n) "," __stringify(REG_POISON) ";" \ + "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" + + asm (POISON_REG(15) + POISON_REG(16) + POISON_REG(17) + POISON_REG(18) + POISON_REG(19) + POISON_REG(20) + POISON_REG(21) + POISON_REG(22) + POISON_REG(23) + POISON_REG(24) + POISON_REG(25) + POISON_REG(26) + POISON_REG(27) + POISON_REG(28) + POISON_REG(29) + : // inputs + : // outputs + : "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", + "26", "27", "28", "29" + ); + #undef POISON_REG +} + +static int check_regs(void) +{ + unsigned long i; + + for (i = 15; i <= 29; i++) + FAIL_IF(signal_regs.gpr[i] != POISONED_REG(i)); + + printf("Regs OK\n"); + return 0; +} + +static void dump_regs(void) +{ + for (int i = 0; i < 32; i += 4) { + printf("r%02d 0x%016lx r%02d 0x%016lx " \ + "r%02d 0x%016lx r%02d 0x%016lx\n", + i, signal_regs.gpr[i], + i+1, signal_regs.gpr[i+1], + i+2, signal_regs.gpr[i+2], + i+3, signal_regs.gpr[i+3]); + } +} + +#ifdef _CALL_AIXDESC +struct opd { + unsigned long ip; + unsigned long toc; + unsigned long env; +}; +static struct opd bad_opd = { + .ip = BAD_NIP, +}; +#define BAD_FUNC (&bad_opd) +#else +#define BAD_FUNC BAD_NIP +#endif + +int test_wild_bctr(void) +{ + int (*func_ptr)(void); + struct sigaction segv = { + .sa_sigaction = segv_handler, + .sa_flags = SA_SIGINFO + }; + struct sigaction usr2 = { + .sa_sigaction = usr2_handler, + .sa_flags = SA_SIGINFO + }; + + FAIL_IF(sigaction(SIGSEGV, &segv, NULL)); + FAIL_IF(sigaction(SIGUSR2, &usr2, NULL)); + + bzero(&signal_regs, sizeof(signal_regs)); + + if (setjmp(setjmp_env) == 0) { + func_ptr = ok; + func_ptr(); + + kill(getpid(), SIGUSR2); + printf("Regs before:\n"); + dump_regs(); + bzero(&signal_regs, sizeof(signal_regs)); + + poison_regs(); + + func_ptr = (int (*)(void))BAD_FUNC; + func_ptr(); + + FAIL_IF(1); /* we didn't segv? */ + } + + FAIL_IF(signal_regs.nip != BAD_NIP); + + printf("All good - took SEGV as expected branching to 0x%llx\n", BAD_NIP); + + dump_regs(); + FAIL_IF(check_regs()); + + return 0; +} + +int main(void) +{ + return test_harness(test_wild_bctr, "wild_bctr"); +} |