summaryrefslogtreecommitdiffstats
path: root/drivers/misc/lkdtm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/misc/lkdtm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/misc/lkdtm')
-rw-r--r--drivers/misc/lkdtm/Makefile29
-rw-r--r--drivers/misc/lkdtm/bugs.c672
-rw-r--r--drivers/misc/lkdtm/cfi.c193
-rw-r--r--drivers/misc/lkdtm/core.c488
-rw-r--r--drivers/misc/lkdtm/fortify.c217
-rw-r--r--drivers/misc/lkdtm/heap.c342
-rw-r--r--drivers/misc/lkdtm/lkdtm.h100
-rw-r--r--drivers/misc/lkdtm/perms.c293
-rw-r--r--drivers/misc/lkdtm/powerpc.c129
-rw-r--r--drivers/misc/lkdtm/refcount.c419
-rw-r--r--drivers/misc/lkdtm/rodata.c11
-rw-r--r--drivers/misc/lkdtm/stackleak.c150
-rw-r--r--drivers/misc/lkdtm/usercopy.c457
13 files changed, 3500 insertions, 0 deletions
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
new file mode 100644
index 0000000000..95ef971b5e
--- /dev/null
+++ b/drivers/misc/lkdtm/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LKDTM) += lkdtm.o
+
+lkdtm-$(CONFIG_LKDTM) += core.o
+lkdtm-$(CONFIG_LKDTM) += bugs.o
+lkdtm-$(CONFIG_LKDTM) += heap.o
+lkdtm-$(CONFIG_LKDTM) += perms.o
+lkdtm-$(CONFIG_LKDTM) += refcount.o
+lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM) += usercopy.o
+lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += cfi.o
+lkdtm-$(CONFIG_LKDTM) += fortify.o
+lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
+
+KASAN_SANITIZE_stackleak.o := n
+
+KASAN_SANITIZE_rodata.o := n
+KCSAN_SANITIZE_rodata.o := n
+KCOV_INSTRUMENT_rodata.o := n
+OBJECT_FILES_NON_STANDARD_rodata.o := y
+CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO) $(RETHUNK_CFLAGS)
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_rodata_objcopy.o := \
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents
+targets += rodata.o rodata_objcopy.o
+$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
+ $(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
new file mode 100644
index 0000000000..c66cc05a68
--- /dev/null
+++ b/drivers/misc/lkdtm/bugs.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
+#include <asm/desc.h>
+#endif
+
+struct lkdtm_list {
+ struct list_head node;
+};
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8UL)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+/*
+ * Make sure compiler does not optimize this function or stack frame away:
+ * - function marked noinline
+ * - stack variables are marked volatile
+ * - stack variables are written (memset()) and read (buf[..] passed as arg)
+ * - function may have external effects (memzero_explicit())
+ * - no tail recursion possible
+ */
+static int noinline recursive_loop(int remaining)
+{
+ volatile char buf[REC_STACK_SIZE];
+ volatile int ret;
+
+ memset((void *)buf, remaining & 0xFF, sizeof(buf));
+ if (!remaining)
+ ret = 0;
+ else
+ ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
+ memzero_explicit((void *)buf, sizeof(buf));
+ return ret;
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+ if (*recur_param < 0)
+ *recur_param = recur_count;
+ else
+ recur_count = *recur_param;
+}
+
+static void lkdtm_PANIC(void)
+{
+ panic("dumptest");
+}
+
+static void lkdtm_BUG(void)
+{
+ BUG();
+}
+
+static int warn_counter;
+
+static void lkdtm_WARNING(void)
+{
+ WARN_ON(++warn_counter);
+}
+
+static void lkdtm_WARNING_MESSAGE(void)
+{
+ WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
+}
+
+static void lkdtm_EXCEPTION(void)
+{
+ *((volatile int *) 0) = 0;
+}
+
+static void lkdtm_LOOP(void)
+{
+ for (;;)
+ ;
+}
+
+static void lkdtm_EXHAUST_STACK(void)
+{
+ pr_info("Calling function with %lu frame size to depth %d ...\n",
+ REC_STACK_SIZE, recur_count);
+ recursive_loop(recur_count);
+ pr_info("FAIL: survived without exhausting stack?!\n");
+}
+
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+ memset(stack, '\xff', 64);
+}
+
+/* This should trip the stack canary, not corrupt the return address. */
+static noinline void lkdtm_CORRUPT_STACK(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *));
+
+ pr_info("Corrupting stack containing char array ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
+}
+
+/* Same as above but will only get a canary with -fstack-protector-strong */
+static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+{
+ union {
+ unsigned short shorts[4];
+ unsigned long *ptr;
+ } data __aligned(sizeof(void *));
+
+ pr_info("Corrupting stack containing union ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
+}
+
+static pid_t stack_pid;
+static unsigned long stack_addr;
+
+static void lkdtm_REPORT_STACK(void)
+{
+ volatile uintptr_t magic;
+ pid_t pid = task_pid_nr(current);
+
+ if (pid != stack_pid) {
+ pr_info("Starting stack offset tracking for pid %d\n", pid);
+ stack_pid = pid;
+ stack_addr = (uintptr_t)&magic;
+ }
+
+ pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
+}
+
+static pid_t stack_canary_pid;
+static unsigned long stack_canary;
+static unsigned long stack_canary_offset;
+
+static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
+{
+ int i = 0;
+ pid_t pid = task_pid_nr(current);
+ unsigned long *canary = (unsigned long *)stack;
+ unsigned long current_offset = 0, init_offset = 0;
+
+ /* Do our best to find the canary in a 16 word window ... */
+ for (i = 1; i < 16; i++) {
+ canary = (unsigned long *)stack + i;
+#ifdef CONFIG_STACKPROTECTOR
+ if (*canary == current->stack_canary)
+ current_offset = i;
+ if (*canary == init_task.stack_canary)
+ init_offset = i;
+#endif
+ }
+
+ if (current_offset == 0) {
+ /*
+ * If the canary doesn't match what's in the task_struct,
+ * we're either using a global canary or the stack frame
+ * layout changed.
+ */
+ if (init_offset != 0) {
+ pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
+ init_offset, pid);
+ } else {
+ pr_warn("FAIL: did not correctly locate stack canary :(\n");
+ pr_expected_config(CONFIG_STACKPROTECTOR);
+ }
+
+ return;
+ } else if (init_offset != 0) {
+ pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
+ }
+
+ canary = (unsigned long *)stack + current_offset;
+ if (stack_canary_pid == 0) {
+ stack_canary = *canary;
+ stack_canary_pid = pid;
+ stack_canary_offset = current_offset;
+ pr_info("Recorded stack canary for pid %d at offset %ld\n",
+ stack_canary_pid, stack_canary_offset);
+ } else if (pid == stack_canary_pid) {
+ pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
+ } else {
+ if (current_offset != stack_canary_offset) {
+ pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
+ stack_canary_offset, current_offset);
+ return;
+ }
+
+ if (*canary == stack_canary) {
+ pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
+ stack_canary_pid, pid, current_offset);
+ } else {
+ pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
+ stack_canary_pid, pid, current_offset);
+ /* Reset the test. */
+ stack_canary_pid = 0;
+ }
+ }
+}
+
+static void lkdtm_REPORT_STACK_CANARY(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *)) = { };
+
+ __lkdtm_REPORT_STACK_CANARY((void *)&data);
+}
+
+static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
+}
+
+static void lkdtm_SOFTLOCKUP(void)
+{
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+}
+
+static void lkdtm_HARDLOCKUP(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+static void lkdtm_SPINLOCKUP(void)
+{
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
+}
+
+static void lkdtm_HUNG_TASK(void)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+}
+
+static volatile unsigned int huge = INT_MAX - 2;
+static volatile unsigned int ignored;
+
+static void lkdtm_OVERFLOW_SIGNED(void)
+{
+ int value;
+
+ value = huge;
+ pr_info("Normal signed addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing signed addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+
+static void lkdtm_OVERFLOW_UNSIGNED(void)
+{
+ unsigned int value;
+
+ value = huge;
+ pr_info("Normal unsigned addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing unsigned addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+/* Intentionally using unannotated flex array definition. */
+struct array_bounds_flex_array {
+ int one;
+ int two;
+ char data[];
+};
+
+struct array_bounds {
+ int one;
+ int two;
+ char data[8];
+ int three;
+};
+
+static void lkdtm_ARRAY_BOUNDS(void)
+{
+ struct array_bounds_flex_array *not_checked;
+ struct array_bounds *checked;
+ volatile int i;
+
+ not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
+ checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
+
+ pr_info("Array access within bounds ...\n");
+ /* For both, touch all bytes in the actual member size. */
+ for (i = 0; i < sizeof(checked->data); i++)
+ checked->data[i] = 'A';
+ /*
+ * For the uninstrumented flex array member, also touch 1 byte
+ * beyond to verify it is correctly uninstrumented.
+ */
+ for (i = 0; i < 2; i++)
+ not_checked->data[i] = 'A';
+
+ pr_info("Array access beyond bounds ...\n");
+ for (i = 0; i < sizeof(checked->data) + 1; i++)
+ checked->data[i] = 'B';
+
+ kfree(not_checked);
+ kfree(checked);
+ pr_err("FAIL: survived array bounds overflow!\n");
+ if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
+}
+
+struct lkdtm_annotated {
+ unsigned long flags;
+ int count;
+ int array[] __counted_by(count);
+};
+
+static volatile int fam_count = 4;
+
+static void lkdtm_FAM_BOUNDS(void)
+{
+ struct lkdtm_annotated *inst;
+
+ inst = kzalloc(struct_size(inst, array, fam_count + 1), GFP_KERNEL);
+ if (!inst) {
+ pr_err("FAIL: could not allocate test struct!\n");
+ return;
+ }
+
+ inst->count = fam_count;
+ pr_info("Array access within bounds ...\n");
+ inst->array[1] = fam_count;
+ ignored = inst->array[1];
+
+ pr_info("Array access beyond bounds ...\n");
+ inst->array[fam_count] = fam_count;
+ ignored = inst->array[fam_count];
+
+ kfree(inst);
+
+ pr_err("FAIL: survived access of invalid flexible array member index!\n");
+
+ if (!__has_attribute(__counted_by__))
+ pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
+ lkdtm_kernel_info);
+ else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
+}
+
+static void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else {
+ pr_err("list_add() corruption not detected!\n");
+ pr_expected_config(CONFIG_LIST_HARDENED);
+ }
+}
+
+static void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else {
+ pr_err("list_del() corruption not detected!\n");
+ pr_expected_config(CONFIG_LIST_HARDENED);
+ }
+}
+
+/* Test that VMAP_STACK is actually allocating with a leading guard page */
+static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack - 1;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page below current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
+}
+
+/* Test that VMAP_STACK is actually allocating with a trailing guard page */
+static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack + THREAD_SIZE;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page above current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
+}
+
+static void lkdtm_UNSET_SMEP(void)
+{
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
+#define MOV_CR4_DEPTH 64
+ void (*direct_write_cr4)(unsigned long val);
+ unsigned char *insn;
+ unsigned long cr4;
+ int i;
+
+ cr4 = native_read_cr4();
+
+ if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
+ pr_err("FAIL: SMEP not in use\n");
+ return;
+ }
+ cr4 &= ~(X86_CR4_SMEP);
+
+ pr_info("trying to clear SMEP normally\n");
+ native_write_cr4(cr4);
+ if (cr4 == native_read_cr4()) {
+ pr_err("FAIL: pinning SMEP failed!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ return;
+ }
+ pr_info("ok: SMEP did not get cleared\n");
+
+ /*
+ * To test the post-write pinning verification we need to call
+ * directly into the middle of native_write_cr4() where the
+ * cr4 write happens, skipping any pinning. This searches for
+ * the cr4 writing instruction.
+ */
+ insn = (unsigned char *)native_write_cr4;
+ OPTIMIZER_HIDE_VAR(insn);
+ for (i = 0; i < MOV_CR4_DEPTH; i++) {
+ /* mov %rdi, %cr4 */
+ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
+ break;
+ /* mov %rdi,%rax; mov %rax, %cr4 */
+ if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
+ insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
+ insn[i+4] == 0x22 && insn[i+5] == 0xe0)
+ break;
+ }
+ if (i >= MOV_CR4_DEPTH) {
+ pr_info("ok: cannot locate cr4 writing call gadget\n");
+ return;
+ }
+ direct_write_cr4 = (void *)(insn + i);
+
+ pr_info("trying to clear SMEP with call gadget\n");
+ direct_write_cr4(cr4);
+ if (native_read_cr4() & X86_CR4_SMEP) {
+ pr_info("ok: SMEP removal was reverted\n");
+ } else {
+ pr_err("FAIL: cleared SMEP not detected!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ }
+#else
+ pr_err("XFAIL: this test is x86_64-only\n");
+#endif
+}
+
+static void lkdtm_DOUBLE_FAULT(void)
+{
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
+ /*
+ * Trigger #DF by setting the stack limit to zero. This clobbers
+ * a GDT TLS slot, which is okay because the current task will die
+ * anyway due to the double fault.
+ */
+ struct desc_struct d = {
+ .type = 3, /* expand-up, writable, accessed data */
+ .p = 1, /* present */
+ .d = 1, /* 32-bit */
+ .g = 0, /* limit in bytes */
+ .s = 1, /* not system */
+ };
+
+ local_irq_disable();
+ write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
+ GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
+
+ /*
+ * Put our zero-limit segment in SS and then trigger a fault. The
+ * 4-byte access to (%esp) will fault with #SS, and the attempt to
+ * deliver the fault will recursively cause #SS and result in #DF.
+ * This whole process happens while NMIs and MCEs are blocked by the
+ * MOV SS window. This is nice because an NMI with an invalid SS
+ * would also double-fault, resulting in the NMI or MCE being lost.
+ */
+ asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
+ "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
+
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
+#endif
+}
+
+#ifdef CONFIG_ARM64
+static noinline void change_pac_parameters(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+ }
+}
+#endif
+
+static noinline void lkdtm_CORRUPT_PAC(void)
+{
+#ifdef CONFIG_ARM64
+#define CORRUPT_PAC_ITERATE 10
+ int i;
+
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH_KERNEL\n");
+
+ if (!system_supports_address_auth()) {
+ pr_err("FAIL: CPU lacks pointer authentication feature\n");
+ return;
+ }
+
+ pr_info("changing PAC parameters to force function return failure...\n");
+ /*
+ * PAC is a hash value computed from input keys, return address and
+ * stack pointer. As pac has fewer bits so there is a chance of
+ * collision, so iterate few times to reduce the collision probability.
+ */
+ for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
+ change_pac_parameters();
+
+ pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
+#else
+ pr_err("XFAIL: this test is arm64-only\n");
+#endif
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(FAM_BOUNDS),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(DOUBLE_FAULT),
+ CRASHTYPE(CORRUPT_PAC),
+};
+
+struct crashtype_category bugs_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
new file mode 100644
index 0000000000..fc28714ae3
--- /dev/null
+++ b/drivers/misc/lkdtm/cfi.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to Control Flow Integrity.
+ */
+#include "lkdtm.h"
+#include <asm/page.h>
+
+static int called_count;
+
+/* Function taking one argument, without a return value. */
+static noinline void lkdtm_increment_void(int *counter)
+{
+ (*counter)++;
+}
+
+/* Function taking one argument, returning int. */
+static noinline int lkdtm_increment_int(int *counter)
+{
+ (*counter)++;
+
+ return *counter;
+}
+
+/* Don't allow the compiler to inline the calls. */
+static noinline void lkdtm_indirect_call(void (*func)(int *))
+{
+ func(&called_count);
+}
+
+/*
+ * This tries to call an indirect function with a mismatched prototype.
+ */
+static void lkdtm_CFI_FORWARD_PROTO(void)
+{
+ /*
+ * Matches lkdtm_increment_void()'s prototype, but not
+ * lkdtm_increment_int()'s prototype.
+ */
+ pr_info("Calling matched prototype ...\n");
+ lkdtm_indirect_call(lkdtm_increment_void);
+
+ pr_info("Calling mismatched prototype ...\n");
+ lkdtm_indirect_call((void *)lkdtm_increment_int);
+
+ pr_err("FAIL: survived mismatched prototype function call!\n");
+ pr_expected_config(CONFIG_CFI_CLANG);
+}
+
+/*
+ * This can stay local to LKDTM, as there should not be a production reason
+ * to disable PAC && SCS.
+ */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+# ifdef CONFIG_ARM64_BTI_KERNEL
+# define __no_pac "branch-protection=bti"
+# else
+# ifdef CONFIG_CC_HAS_BRANCH_PROT_PAC_RET
+# define __no_pac "branch-protection=none"
+# else
+# define __no_pac "sign-return-address=none"
+# endif
+# endif
+# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
+#else
+# define __no_ret_protection __noscs
+#endif
+
+#define no_pac_addr(addr) \
+ ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+
+/* The ultimate ROP gadget. */
+static noinline __no_ret_protection
+void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static noinline
+void set_return_addr(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static volatile int force_check;
+
+static void lkdtm_CFI_BACKWARD(void)
+{
+ /* Use calculated gotos to keep labels addressable. */
+ void *labels[] = { NULL, &&normal, &&redirected, &&check_normal, &&check_redirected };
+
+ pr_info("Attempting unchecked stack return address redirection ...\n");
+
+ /* Always false */
+ if (force_check) {
+ /*
+ * Prepare to call with NULLs to avoid parameters being treated as
+ * constants in -02.
+ */
+ set_return_addr_unchecked(NULL, NULL);
+ set_return_addr(NULL, NULL);
+ if (force_check)
+ goto *labels[1];
+ if (force_check)
+ goto *labels[2];
+ if (force_check)
+ goto *labels[3];
+ if (force_check)
+ goto *labels[4];
+ return;
+ }
+
+ /*
+ * Use fallthrough switch case to keep basic block ordering between
+ * set_return_addr*() and the label after it.
+ */
+ switch (force_check) {
+ case 0:
+ set_return_addr_unchecked(&&normal, &&redirected);
+ fallthrough;
+ case 1:
+normal:
+ /* Always true */
+ if (!force_check) {
+ pr_err("FAIL: stack return address manipulation failed!\n");
+ /* If we can't redirect "normally", we can't test mitigations. */
+ return;
+ }
+ break;
+ default:
+redirected:
+ pr_info("ok: redirected stack return address.\n");
+ break;
+ }
+
+ pr_info("Attempting checked stack return address redirection ...\n");
+
+ switch (force_check) {
+ case 0:
+ set_return_addr(&&check_normal, &&check_redirected);
+ fallthrough;
+ case 1:
+check_normal:
+ /* Always true */
+ if (!force_check) {
+ pr_info("ok: control flow unchanged.\n");
+ return;
+ }
+
+check_redirected:
+ pr_err("FAIL: stack return address was redirected!\n");
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+ pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
+ return;
+ }
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
+ pr_expected_config(CONFIG_SHADOW_CALL_STACK);
+ return;
+ }
+ pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
+ lkdtm_kernel_info,
+ "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(CFI_BACKWARD),
+};
+
+struct crashtype_category cfi_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
new file mode 100644
index 0000000000..0772e4a475
--- /dev/null
+++ b/drivers/misc/lkdtm/core.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.rst for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/utsname.h>
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off);
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
+#endif
+
+/* Crash points */
+struct crashpoint {
+ const char *name;
+ const struct file_operations fops;
+ struct kprobe kprobe;
+};
+
+#define CRASHPOINT(_name, _symbol) \
+ { \
+ .name = _name, \
+ .fops = { \
+ .read = lkdtm_debugfs_read, \
+ .llseek = generic_file_llseek, \
+ .open = lkdtm_debugfs_open, \
+ .write = CRASHPOINT_WRITE(_symbol) \
+ }, \
+ CRASHPOINT_KPROBE(_symbol) \
+ }
+
+/* Define the possible places where we can trigger a crash point. */
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
+#ifdef CONFIG_KPROBES
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_SUBMIT_BH", "submit_bh"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
+#endif
+};
+
+/* List of possible types for crashes that can be triggered. */
+static const struct crashtype_category *crashtype_categories[] = {
+ &bugs_crashtypes,
+ &heap_crashtypes,
+ &perms_crashtypes,
+ &refcount_crashtypes,
+ &usercopy_crashtypes,
+ &stackleak_crashtypes,
+ &cfi_crashtypes,
+ &fortify_crashtypes,
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ &powerpc_crashtypes,
+#endif
+};
+
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+/*
+ * For test debug reporting when CI systems provide terse summaries.
+ * TODO: Remove this once reasonable reporting exists in most CI systems:
+ * https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com
+ */
+char *lkdtm_kernel_info;
+
+/* Return the crashtype number or NULL if the name is invalid */
+static const struct crashtype *find_crashtype(const char *name)
+{
+ int cat, idx;
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
+
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ if (!strcmp(name, crashtype->name))
+ return crashtype;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
+{
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
+ crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+ const struct crashtype *crashtype)
+{
+ int ret;
+
+ /* If this doesn't have a symbol, just call immediately. */
+ if (!crashpoint->kprobe.symbol_name) {
+ lkdtm_do_action(crashtype);
+ return 0;
+ }
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ lkdtm_crashpoint = crashpoint;
+ lkdtm_crashtype = crashtype;
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
+ if (ret < 0) {
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
+ lkdtm_crashpoint = NULL;
+ lkdtm_crashtype = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
+
+ spin_lock_irqsave(&crash_count_lock, flags);
+ crash_count--;
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+ if (crash_count == 0) {
+ do_it = true;
+ crash_count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&crash_count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashpoint *crashpoint = file_inode(f)->i_private;
+ const struct crashtype *crashtype = NULL;
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long)buf);
+
+ if (!crashtype)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ int n, cat, idx;
+ ssize_t out;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
+
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtype->name);
+ }
+ }
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ const struct crashtype *crashtype;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long) buf);
+ if (!crashtype)
+ return -EINVAL;
+
+ pr_info("Performing direct entry %s\n", crashtype->name);
+ lkdtm_do_action(crashtype);
+ *off += count;
+
+ return count;
+}
+
+#ifndef MODULE
+/*
+ * To avoid needing to export parse_args(), just don't use this code
+ * when LKDTM is built as a module.
+ */
+struct check_cmdline_args {
+ const char *param;
+ int value;
+};
+
+static int lkdtm_parse_one(char *param, char *val,
+ const char *unused, void *arg)
+{
+ struct check_cmdline_args *args = arg;
+
+ /* short circuit if we already found a value. */
+ if (args->value != -ESRCH)
+ return 0;
+ if (strncmp(param, args->param, strlen(args->param)) == 0) {
+ bool bool_result;
+ int ret;
+
+ ret = kstrtobool(val, &bool_result);
+ if (ret == 0)
+ args->value = bool_result;
+ }
+ return 0;
+}
+
+int lkdtm_check_bool_cmdline(const char *param)
+{
+ char *command_line;
+ struct check_cmdline_args args = {
+ .param = param,
+ .value = -ESRCH,
+ };
+
+ command_line = kstrdup(saved_command_line, GFP_KERNEL);
+ if (!command_line)
+ return -ENOMEM;
+
+ parse_args("Setting sysctl args", command_line,
+ NULL, 0, -1, -1, &args, lkdtm_parse_one);
+
+ kfree(command_line);
+
+ return args.value;
+}
+#endif
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ struct crashpoint *crashpoint = NULL;
+ const struct crashtype *crashtype = NULL;
+ int ret;
+ int i;
+
+ /* Neither or both of these need to be set */
+ if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+ pr_err("Need both cpoint_type and cpoint_name or neither\n");
+ return -EINVAL;
+ }
+
+ if (cpoint_type) {
+ crashtype = find_crashtype(cpoint_type);
+ if (!crashtype) {
+ pr_err("Unknown crashtype '%s'\n", cpoint_type);
+ return -EINVAL;
+ }
+ }
+
+ if (cpoint_name) {
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ if (!strcmp(cpoint_name, crashpoints[i].name))
+ crashpoint = &crashpoints[i];
+ }
+
+ /* Refuse unknown crashpoints. */
+ if (!crashpoint) {
+ pr_err("Invalid crashpoint %s\n", cpoint_name);
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_KPROBES
+ /* Set crash count. */
+ crash_count = cpoint_count;
+#endif
+
+ /* Common initialization. */
+ lkdtm_kernel_info = kasprintf(GFP_KERNEL, "kernel (%s %s)",
+ init_uts_ns.name.release,
+ init_uts_ns.name.machine);
+
+ /* Handle test-specific initialization. */
+ lkdtm_bugs_init(&recur_count);
+ lkdtm_perms_init();
+ lkdtm_usercopy_init();
+ lkdtm_heap_init();
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+
+ /* Install debugfs trigger files. */
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ struct crashpoint *cur = &crashpoints[i];
+
+ debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, cur,
+ &cur->fops);
+ }
+
+ /* Install crashpoint if one was selected. */
+ if (crashpoint) {
+ ret = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (ret < 0) {
+ pr_info("Invalid crashpoint %s\n", crashpoint->name);
+ goto out_err;
+ }
+ pr_info("Crash point %s of type %s registered\n",
+ crashpoint->name, cpoint_type);
+ } else {
+ pr_info("No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ /* Handle test-specific clean-up. */
+ lkdtm_heap_exit();
+ lkdtm_usercopy_exit();
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ kfree(lkdtm_kernel_info);
+
+ pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
new file mode 100644
index 0000000000..0159276656
--- /dev/null
+++ b/drivers/misc/lkdtm/fortify.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Francis Laniel <laniel_francis@privacyrequired.com>
+ *
+ * Add tests related to fortified functions in this file.
+ */
+#include "lkdtm.h"
+#include <linux/string.h>
+#include <linux/slab.h>
+
+static volatile int fortify_scratch_space;
+
+static void lkdtm_FORTIFY_STR_OBJECT(void)
+{
+ struct target {
+ char a[10];
+ int foo;
+ } target[3] = {};
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 20;
+
+ pr_info("trying to strcmp() past the end of a struct\n");
+
+ strncpy(target[0].a, target[1].a, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target[0].a[3];
+
+ pr_err("FAIL: fortify did not block a strncpy() object write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+}
+
+static void lkdtm_FORTIFY_STR_MEMBER(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ volatile int size = 20;
+ char *src;
+
+ src = kmalloc(size, GFP_KERNEL);
+ strscpy(src, "over ten bytes", size);
+ size = strlen(src) + 1;
+
+ pr_info("trying to strncpy() past the end of a struct member...\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use a
+ * volatile to force a runtime error.
+ */
+ strncpy(target.a, src, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a strncpy() struct member write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+static void lkdtm_FORTIFY_MEM_OBJECT(void)
+{
+ int before[10];
+ struct target {
+ char a[10];
+ int foo;
+ } target = {};
+ int after[10];
+ /*
+ * Using volatile prevents the compiler from determining the value of
+ * 'size' at compile time. Without that, we would get a compile error
+ * rather than a runtime error.
+ */
+ volatile int size = 20;
+
+ memset(before, 0, sizeof(before));
+ memset(after, 0, sizeof(after));
+ fortify_scratch_space = before[5];
+ fortify_scratch_space = after[5];
+
+ pr_info("trying to memcpy() past the end of a struct\n");
+
+ pr_info("0: %zu\n", __builtin_object_size(&target, 0));
+ pr_info("1: %zu\n", __builtin_object_size(&target, 1));
+ pr_info("s: %d\n", size);
+ memcpy(&target, &before, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() object write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+}
+
+static void lkdtm_FORTIFY_MEM_MEMBER(void)
+{
+ struct target {
+ char a[10];
+ char b[10];
+ } target;
+ volatile int size = 20;
+ char *src;
+
+ src = kmalloc(size, GFP_KERNEL);
+ strscpy(src, "over ten bytes", size);
+ size = strlen(src) + 1;
+
+ pr_info("trying to memcpy() past the end of a struct member...\n");
+
+ /*
+ * strncpy(target.a, src, 20); will hit a compile error because the
+ * compiler knows at build time that target.a < 20 bytes. Use a
+ * volatile to force a runtime error.
+ */
+ memcpy(target.a, src, size);
+
+ /* Store result to global to prevent the code from being eliminated */
+ fortify_scratch_space = target.a[3];
+
+ pr_err("FAIL: fortify did not block a memcpy() struct member write overflow!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+/*
+ * Calls fortified strscpy to test that it returns the same result as vanilla
+ * strscpy and generate a panic because there is a write overflow (i.e. src
+ * length is greater than dst length).
+ */
+static void lkdtm_FORTIFY_STRSCPY(void)
+{
+ char *src;
+ char dst[5];
+
+ struct {
+ union {
+ char big[10];
+ char src[5];
+ };
+ } weird = { .big = "hello!" };
+ char weird_dst[sizeof(weird.src) + 1];
+
+ src = kstrdup("foobar", GFP_KERNEL);
+
+ if (src == NULL)
+ return;
+
+ /* Vanilla strscpy returns -E2BIG if size is 0. */
+ if (strscpy(dst, src, 0) != -E2BIG)
+ pr_warn("FAIL: strscpy() of 0 length did not return -E2BIG\n");
+
+ /* Vanilla strscpy returns -E2BIG if src is truncated. */
+ if (strscpy(dst, src, sizeof(dst)) != -E2BIG)
+ pr_warn("FAIL: strscpy() did not return -E2BIG while src is truncated\n");
+
+ /* After above call, dst must contain "foob" because src was truncated. */
+ if (strncmp(dst, "foob", sizeof(dst)) != 0)
+ pr_warn("FAIL: after strscpy() dst does not contain \"foob\" but \"%s\"\n",
+ dst);
+
+ /* Shrink src so the strscpy() below succeeds. */
+ src[3] = '\0';
+
+ /*
+ * Vanilla strscpy returns number of character copied if everything goes
+ * well.
+ */
+ if (strscpy(dst, src, sizeof(dst)) != 3)
+ pr_warn("FAIL: strscpy() did not return 3 while src was copied entirely truncated\n");
+
+ /* After above call, dst must contain "foo" because src was copied. */
+ if (strncmp(dst, "foo", sizeof(dst)) != 0)
+ pr_warn("FAIL: after strscpy() dst does not contain \"foo\" but \"%s\"\n",
+ dst);
+
+ /* Test when src is embedded inside a union. */
+ strscpy(weird_dst, weird.src, sizeof(weird_dst));
+
+ if (strcmp(weird_dst, "hello") != 0)
+ pr_warn("FAIL: after strscpy() weird_dst does not contain \"hello\" but \"%s\"\n",
+ weird_dst);
+
+ /* Restore src to its initial value. */
+ src[3] = 'b';
+
+ /*
+ * Use strlen here so size cannot be known at compile time and there is
+ * a runtime write overflow.
+ */
+ strscpy(dst, src, strlen(src));
+
+ pr_err("FAIL: strscpy() overflow not detected!\n");
+ pr_expected_config(CONFIG_FORTIFY_SOURCE);
+
+ kfree(src);
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(FORTIFY_STR_OBJECT),
+ CRASHTYPE(FORTIFY_STR_MEMBER),
+ CRASHTYPE(FORTIFY_MEM_OBJECT),
+ CRASHTYPE(FORTIFY_MEM_MEMBER),
+ CRASHTYPE(FORTIFY_STRSCPY),
+};
+
+struct crashtype_category fortify_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
new file mode 100644
index 0000000000..0ce4cbf6ab
--- /dev/null
+++ b/drivers/misc/lkdtm/heap.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched.h>
+
+static struct kmem_cache *double_free_cache;
+static struct kmem_cache *a_cache;
+static struct kmem_cache *b_cache;
+
+/*
+ * Using volatile here means the compiler cannot ever make assumptions
+ * about this value. This means compile-time length checks involving
+ * this variable cannot be performed; only run-time checks.
+ */
+static volatile int __offset = 1;
+
+/*
+ * If there aren't guard pages, it's likely that a consecutive allocation will
+ * let us overflow into the second allocation without overwriting something real.
+ *
+ * This should always be caught because there is an unconditional unmapped
+ * page after vmap allocations.
+ */
+static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+{
+ char *one, *two;
+
+ one = vzalloc(PAGE_SIZE);
+ OPTIMIZER_HIDE_VAR(one);
+ two = vzalloc(PAGE_SIZE);
+
+ pr_info("Attempting vmalloc linear overflow ...\n");
+ memset(one, 0xAA, PAGE_SIZE + __offset);
+
+ vfree(two);
+ vfree(one);
+}
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ *
+ * This should get caught by either memory tagging, KASan, or by using
+ * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
+ */
+static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
+{
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return;
+
+ pr_info("Attempting slab linear overflow ...\n");
+ OPTIMIZER_HIDE_VAR(data);
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+}
+
+static void lkdtm_WRITE_AFTER_FREE(void)
+{
+ int *base, *again;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ return;
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+static void lkdtm_READ_AFTER_FREE(void)
+{
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
+ */
+ size_t offset = sizeof(*base);
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_info("Unable to allocate base memory.\n");
+ return;
+ }
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ kfree(base);
+ return;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ } else {
+ pr_err("FAIL: Memory was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
+ }
+
+ kfree(val);
+}
+
+static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
+ schedule();
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+}
+
+static void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ free_page(p);
+ return;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ } else {
+ pr_err("FAIL: Buddy page was not poisoned!\n");
+ pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
+ }
+
+ kfree(val);
+}
+
+static void lkdtm_SLAB_INIT_ON_ALLOC(void)
+{
+ u8 *first;
+ u8 *val;
+
+ first = kmalloc(512, GFP_KERNEL);
+ if (!first) {
+ pr_info("Unable to allocate 512 bytes the first time.\n");
+ return;
+ }
+
+ memset(first, 0xAB, 512);
+ kfree(first);
+
+ val = kmalloc(512, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate 512 bytes the second time.\n");
+ return;
+ }
+ if (val != first) {
+ pr_warn("Reallocation missed clobbered memory.\n");
+ }
+
+ if (memchr(val, 0xAB, 512) == NULL) {
+ pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
+ } else {
+ pr_err("FAIL: Slab was not initialized\n");
+ pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
+ }
+ kfree(val);
+}
+
+static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+{
+ u8 *first;
+ u8 *val;
+
+ first = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!first) {
+ pr_info("Unable to allocate first free page\n");
+ return;
+ }
+
+ memset(first, 0xAB, PAGE_SIZE);
+ free_page((unsigned long)first);
+
+ val = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate second free page\n");
+ return;
+ }
+
+ if (val != first) {
+ pr_warn("Reallocation missed clobbered memory.\n");
+ }
+
+ if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
+ pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
+ } else {
+ pr_err("FAIL: Slab was not initialized\n");
+ pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
+ }
+ free_page((unsigned long)val);
+}
+
+static void lkdtm_SLAB_FREE_DOUBLE(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate double_free_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345678;
+ pr_info("Attempting double slab free ...\n");
+ kmem_cache_free(double_free_cache, val);
+ kmem_cache_free(double_free_cache, val);
+}
+
+static void lkdtm_SLAB_FREE_CROSS(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(a_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate a_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345679;
+ pr_info("Attempting cross-cache slab free ...\n");
+ kmem_cache_free(b_cache, val);
+}
+
+static void lkdtm_SLAB_FREE_PAGE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+
+ pr_info("Attempting non-Slab slab free ...\n");
+ kmem_cache_free(NULL, (void *)p);
+ free_page(p);
+}
+
+/*
+ * We have constructors to keep the caches distinctly separated without
+ * needing to boot with "slab_nomerge".
+ */
+static void ctor_double_free(void *region)
+{ }
+static void ctor_a(void *region)
+{ }
+static void ctor_b(void *region)
+{ }
+
+void __init lkdtm_heap_init(void)
+{
+ double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
+ 64, 0, 0, ctor_double_free);
+ a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
+ b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
+}
+
+void __exit lkdtm_heap_exit(void)
+{
+ kmem_cache_destroy(double_free_cache);
+ kmem_cache_destroy(a_cache);
+ kmem_cache_destroy(b_cache);
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+ CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_INIT_ON_ALLOC),
+ CRASHTYPE(BUDDY_INIT_ON_ALLOC),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
+};
+
+struct crashtype_category heap_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
new file mode 100644
index 0000000000..015e048402
--- /dev/null
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+extern char *lkdtm_kernel_info;
+
+#define pr_expected_config(kconfig) \
+do { \
+ if (IS_ENABLED(kconfig)) \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
+ else \
+ pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
+ lkdtm_kernel_info); \
+} while (0)
+
+#ifndef MODULE
+int lkdtm_check_bool_cmdline(const char *param);
+#define pr_expected_config_param(kconfig, param) \
+do { \
+ if (IS_ENABLED(kconfig)) { \
+ switch (lkdtm_check_bool_cmdline(param)) { \
+ case 0: \
+ pr_warn("This is probably expected, since this %s was built with " #kconfig "=y but booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
+ break; \
+ case 1: \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y and booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
+ break; \
+ default: \
+ pr_err("Unexpected! This %s was built with " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
+ } \
+ } else { \
+ switch (lkdtm_check_bool_cmdline(param)) { \
+ case 0: \
+ pr_warn("This is probably expected, as this %s was built *without* " #kconfig "=y and booted with '" param "=N'\n", \
+ lkdtm_kernel_info); \
+ break; \
+ case 1: \
+ pr_err("Unexpected! This %s was built *without* " #kconfig "=y but booted with '" param "=Y'\n", \
+ lkdtm_kernel_info); \
+ break; \
+ default: \
+ pr_err("This is probably expected, since this %s was built *without* " #kconfig "=y (and booted without '" param "' specified)\n", \
+ lkdtm_kernel_info); \
+ break; \
+ } \
+ } \
+} while (0)
+#else
+#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
+#endif
+
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Category's collection of crashtypes. */
+struct crashtype_category {
+ struct crashtype *crashtypes;
+ size_t len;
+};
+
+/* Each category's crashtypes list. */
+extern struct crashtype_category bugs_crashtypes;
+extern struct crashtype_category heap_crashtypes;
+extern struct crashtype_category perms_crashtypes;
+extern struct crashtype_category refcount_crashtypes;
+extern struct crashtype_category usercopy_crashtypes;
+extern struct crashtype_category stackleak_crashtypes;
+extern struct crashtype_category cfi_crashtypes;
+extern struct crashtype_category fortify_crashtypes;
+extern struct crashtype_category powerpc_crashtypes;
+
+/* Each category's init/exit routines. */
+void __init lkdtm_bugs_init(int *recur_param);
+void __init lkdtm_heap_init(void);
+void __exit lkdtm_heap_exit(void);
+void __init lkdtm_perms_init(void);
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+
+/* Special declaration for function-in-rodata. */
+void lkdtm_rodata_do_nothing(void);
+
+#endif
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
new file mode 100644
index 0000000000..b93404d656
--- /dev/null
+++ b/drivers/misc/lkdtm/perms.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE true
+#define CODE_AS_IS false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is const, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static noinline void do_nothing(void)
+{
+ return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static noinline void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
+static noinline void do_almost_nothing(void)
+{
+ pr_info("do_nothing was hijacked!\n");
+}
+
+static void *setup_function_descriptor(func_desc_t *fdesc, void *dst)
+{
+ if (!have_function_descriptors())
+ return dst;
+
+ memcpy(fdesc, do_nothing, sizeof(*fdesc));
+ fdesc->addr = (unsigned long)dst;
+ barrier();
+
+ return fdesc;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+ void (*func)(void);
+ func_desc_t fdesc;
+ void *do_nothing_text = dereference_function_descriptor(do_nothing);
+
+ pr_info("attempting ok execution at %px\n", do_nothing_text);
+ do_nothing();
+
+ if (write == CODE_WRITE) {
+ memcpy(dst, do_nothing_text, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + EXEC_SIZE);
+ }
+ pr_info("attempting bad execution at %px\n", dst);
+ func = setup_function_descriptor(&fdesc, dst);
+ func();
+ pr_err("FAIL: func returned\n");
+}
+
+static void execute_user_location(void *dst)
+{
+ int copied;
+
+ /* Intentionally crossing kernel/user memory boundary. */
+ void (*func)(void);
+ func_desc_t fdesc;
+ void *do_nothing_text = dereference_function_descriptor(do_nothing);
+
+ pr_info("attempting ok execution at %px\n", do_nothing_text);
+ do_nothing();
+
+ copied = access_process_vm(current, (unsigned long)dst, do_nothing_text,
+ EXEC_SIZE, FOLL_WRITE);
+ if (copied < EXEC_SIZE)
+ return;
+ pr_info("attempting bad execution at %px\n", dst);
+ func = setup_function_descriptor(&fdesc, dst);
+ func();
+ pr_err("FAIL: func returned\n");
+}
+
+static void lkdtm_WRITE_RO(void)
+{
+ /* Explicitly cast away "const" for the test and make volatile. */
+ volatile unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %px\n", ptr);
+ *ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
+}
+
+static void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+ volatile unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ return;
+ }
+
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
+ *ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
+}
+
+static void lkdtm_WRITE_KERN(void)
+{
+ size_t size;
+ volatile unsigned char *ptr;
+
+ size = (unsigned long)dereference_function_descriptor(do_overwritten) -
+ (unsigned long)dereference_function_descriptor(do_nothing);
+ ptr = dereference_function_descriptor(do_overwritten);
+
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
+ memcpy((void *)ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ pr_err("FAIL: survived bad write\n");
+
+ do_overwritten();
+}
+
+static void lkdtm_WRITE_OPD(void)
+{
+ size_t size = sizeof(func_desc_t);
+ void (*func)(void) = do_nothing;
+
+ if (!have_function_descriptors()) {
+ pr_info("XFAIL: Platform doesn't use function descriptors.\n");
+ return;
+ }
+ pr_info("attempting bad %zu bytes write at %px\n", size, do_nothing);
+ memcpy(do_nothing, do_almost_nothing, size);
+ pr_err("FAIL: survived bad write\n");
+
+ asm("" : "=m"(func));
+ func();
+}
+
+static void lkdtm_EXEC_DATA(void)
+{
+ execute_location(data_area, CODE_WRITE);
+}
+
+static void lkdtm_EXEC_STACK(void)
+{
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area, CODE_WRITE);
+}
+
+static void lkdtm_EXEC_KMALLOC(void)
+{
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area, CODE_WRITE);
+ kfree(kmalloc_area);
+}
+
+static void lkdtm_EXEC_VMALLOC(void)
+{
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area, CODE_WRITE);
+ vfree(vmalloc_area);
+}
+
+static void lkdtm_EXEC_RODATA(void)
+{
+ execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
+ CODE_AS_IS);
+}
+
+static void lkdtm_EXEC_USERSPACE(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+static void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
+static void lkdtm_ACCESS_USERSPACE(void)
+{
+ unsigned long user_addr, tmp = 0;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+ pr_warn("copy_to_user failed\n");
+ vm_munmap(user_addr, PAGE_SIZE);
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
+
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+static void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ volatile unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
+}
+
+void __init lkdtm_perms_init(void)
+{
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(WRITE_OPD),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
+};
+
+struct crashtype_category perms_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
new file mode 100644
index 0000000000..be38544991
--- /dev/null
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu.h>
+
+/* Inserts new slb entries */
+static void insert_slb_entry(unsigned long p, int ssize, int page_size)
+{
+ unsigned long flags;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
+ preempt_disable();
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
+ : "memory");
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
+ : "memory");
+ preempt_enable();
+}
+
+/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
+static int inject_vmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = vmalloc(PAGE_SIZE);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ vfree(p);
+ return 0;
+}
+
+/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
+static int inject_kmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = kmalloc(2048, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ kfree(p);
+ return 0;
+}
+
+/*
+ * Few initial SLB entries are bolted. Add a test to inject
+ * multihit in bolted entry 0.
+ */
+static void insert_dup_slb_entry_0(void)
+{
+ unsigned long test_address = PAGE_OFFSET, *test_ptr;
+ unsigned long esid, vsid;
+ unsigned long i = 0;
+
+ test_ptr = (unsigned long *)test_address;
+ preempt_disable();
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | SLB_NUM_BOLTED)
+ : "memory");
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | (SLB_NUM_BOLTED + 1))
+ : "memory");
+
+ pr_info("%s accessing test address 0x%lx: 0x%lx\n",
+ __func__, test_address, *test_ptr);
+
+ preempt_enable();
+}
+
+static void lkdtm_PPC_SLB_MULTIHIT(void)
+{
+ if (!radix_enabled()) {
+ pr_info("Injecting SLB multihit errors\n");
+ /*
+ * These need not be separate tests, And they do pretty
+ * much same thing. In any case we must recover from the
+ * errors introduced by these functions, machine would not
+ * survive these tests in case of failure to handle.
+ */
+ inject_vmalloc_slb_multihit();
+ inject_kmalloc_slb_multihit();
+ insert_dup_slb_entry_0();
+ pr_info("Recovered from SLB multihit errors\n");
+ } else {
+ pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
+ }
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+};
+
+struct crashtype_category powerpc_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
new file mode 100644
index 0000000000..5cd488f54c
--- /dev/null
+++ b/drivers/misc/lkdtm/refcount.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to refcount bugs (e.g. overflow,
+ * underflow, reaching zero untested, etc).
+ */
+#include "lkdtm.h"
+#include <linux/refcount.h>
+
+static void overflow_check(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Overflow detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Overflow detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() above the maximum value of the refcount implementation,
+ * should at least saturate, and at most also WARN.
+ */
+static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_inc() without overflow\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
+
+ pr_info("attempting bad refcount_inc() overflow\n");
+ refcount_inc(&over);
+ refcount_inc(&over);
+
+ overflow_check(&over);
+}
+
+/* refcount_add() should behave just like refcount_inc() above. */
+static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_add() without overflow\n");
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_add(4, &over);
+
+ pr_info("attempting bad refcount_add() overflow\n");
+ refcount_add(4, &over);
+
+ overflow_check(&over);
+}
+
+/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_inc_not_zero() overflow\n");
+ if (!refcount_inc_not_zero(&over))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+/* refcount_add_not_zero() should behave just like refcount_inc() above. */
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_add_not_zero() overflow\n");
+ if (!refcount_add_not_zero(6, &over))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+static void check_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ case 0:
+ pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
+ * zero it should either saturate (when inc-from-zero isn't protected)
+ * or stay at zero (when inc-from-zero is protected) and should WARN for both.
+ */
+static void lkdtm_REFCOUNT_DEC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(2);
+
+ pr_info("attempting good refcount_dec()\n");
+ refcount_dec(&zero);
+
+ pr_info("attempting bad refcount_dec() to zero\n");
+ refcount_dec(&zero);
+
+ check_zero(&zero);
+}
+
+static void check_negative(refcount_t *ref, int start)
+{
+ /*
+ * refcount_t refuses to move a refcount at all on an
+ * over-sub, so we have to track our starting position instead of
+ * looking only at zero-pinning.
+ */
+ if (refcount_read(ref) == start) {
+ pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
+ start);
+ return;
+ }
+
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Negative detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Negative detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/* A refcount_dec() going negative should saturate and may WARN. */
+static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec() below zero\n");
+ refcount_dec(&neg);
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_dec_and_test() should act like refcount_dec() above when
+ * going negative.
+ */
+static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec_and_test() below zero\n");
+ if (refcount_dec_and_test(&neg))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_sub_and_test() should act like refcount_dec_and_test()
+ * above when going negative.
+ */
+static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(3);
+
+ pr_info("attempting bad refcount_sub_and_test() below zero\n");
+ if (refcount_sub_and_test(5, &neg))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_negative(&neg, 3);
+}
+
+static void check_from_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case 0:
+ pr_info("Zero detected: stayed at zero\n");
+ break;
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_info("Fail: zero not detected, incremented to %d\n",
+ refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from zero should pin to zero or saturate and may WARN.
+ */
+static void lkdtm_REFCOUNT_INC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_inc_not_zero() from zero\n");
+ if (!refcount_inc_not_zero(&zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero!\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_inc() from zero\n");
+ refcount_inc(&zero);
+
+ check_from_zero(&zero);
+}
+
+/*
+ * A refcount_add() should act like refcount_inc() above when starting
+ * at zero.
+ */
+static void lkdtm_REFCOUNT_ADD_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_add_not_zero() from zero\n");
+ if (!refcount_add_not_zero(3, &zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_add() from zero\n");
+ refcount_add(3, &zero);
+
+ check_from_zero(&zero);
+}
+
+static void check_saturated(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Saturation detected: still saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Saturation detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from a saturated value should at most warn about
+ * being saturated already.
+ */
+static void lkdtm_REFCOUNT_INC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc() from saturated\n");
+ refcount_inc(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_DEC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_dec(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_ADD_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_add(8, &sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
+ if (!refcount_inc_not_zero(&sat))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_add_not_zero() from saturated\n");
+ if (!refcount_add_not_zero(7, &sat))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec_and_test() from saturated\n");
+ if (refcount_dec_and_test(&sat))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_sub_and_test() from saturated\n");
+ if (refcount_sub_and_test(8, &sat))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Used to time the existing atomic_t when used for reference counting */
+static void lkdtm_ATOMIC_TIMING(void)
+{
+ unsigned int i;
+ atomic_t count = ATOMIC_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ atomic_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (atomic_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("atomic timing: done\n");
+}
+
+/*
+ * This can be compared to ATOMIC_TIMING when implementing fast refcount
+ * protections. Looking at the number of CPU cycles tells the real story
+ * about performance. For example:
+ * cd /sys/kernel/debug/provoke-crash
+ * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
+ */
+static void lkdtm_REFCOUNT_TIMING(void)
+{
+ unsigned int i;
+ refcount_t count = REFCOUNT_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ refcount_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (refcount_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("refcount timing: done\n");
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(REFCOUNT_TIMING),
+};
+
+struct crashtype_category refcount_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c
new file mode 100644
index 0000000000..baacb876d1
--- /dev/null
+++ b/drivers/misc/lkdtm/rodata.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void noinstr lkdtm_rodata_do_nothing(void)
+{
+ /* Does nothing. We just want an architecture agnostic "return". */
+}
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
new file mode 100644
index 0000000000..f1d0221609
--- /dev/null
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with STACKLEAK_POISON).
+ *
+ * Authors:
+ * Alexander Popov <alex.popov@linux.com>
+ * Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/stackleak.h>
+
+#if defined(CONFIG_GCC_PLUGIN_STACKLEAK)
+/*
+ * Check that stackleak tracks the lowest stack pointer and erases the stack
+ * below this as expected.
+ *
+ * To prevent the lowest stack pointer changing during the test, IRQs are
+ * masked and instrumentation of this function is disabled. We assume that the
+ * compiler will create a fixed-size stack frame for this function.
+ *
+ * Any non-inlined function may make further use of the stack, altering the
+ * lowest stack pointer and/or clobbering poison values. To avoid spurious
+ * failures we must avoid printing until the end of the test or have already
+ * encountered a failure condition.
+ */
+static void noinstr check_stackleak_irqoff(void)
+{
+ const unsigned long task_stack_base = (unsigned long)task_stack_page(current);
+ const unsigned long task_stack_low = stackleak_task_low_bound(current);
+ const unsigned long task_stack_high = stackleak_task_high_bound(current);
+ const unsigned long current_sp = current_stack_pointer;
+ const unsigned long lowest_sp = current->lowest_stack;
+ unsigned long untracked_high;
+ unsigned long poison_high, poison_low;
+ bool test_failed = false;
+
+ /*
+ * Check that the current and lowest recorded stack pointer values fall
+ * within the expected task stack boundaries. These tests should never
+ * fail unless the boundaries are incorrect or we're clobbering the
+ * STACK_END_MAGIC, and in either casee something is seriously wrong.
+ */
+ if (current_sp < task_stack_low || current_sp >= task_stack_high) {
+ instrumentation_begin();
+ pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ current_sp, task_stack_low, task_stack_high - 1);
+ test_failed = true;
+ goto out;
+ }
+ if (lowest_sp < task_stack_low || lowest_sp >= task_stack_high) {
+ instrumentation_begin();
+ pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n",
+ lowest_sp, task_stack_low, task_stack_high - 1);
+ test_failed = true;
+ goto out;
+ }
+
+ /*
+ * Depending on what has run prior to this test, the lowest recorded
+ * stack pointer could be above or below the current stack pointer.
+ * Start from the lowest of the two.
+ *
+ * Poison values are naturally-aligned unsigned longs. As the current
+ * stack pointer might not be sufficiently aligned, we must align
+ * downwards to find the lowest known stack pointer value. This is the
+ * high boundary for a portion of the stack which may have been used
+ * without being tracked, and has to be scanned for poison.
+ */
+ untracked_high = min(current_sp, lowest_sp);
+ untracked_high = ALIGN_DOWN(untracked_high, sizeof(unsigned long));
+
+ /*
+ * Find the top of the poison in the same way as the erasing code.
+ */
+ poison_high = stackleak_find_top_of_poison(task_stack_low, untracked_high);
+
+ /*
+ * Check whether the poisoned portion of the stack (if any) consists
+ * entirely of poison. This verifies the entries that
+ * stackleak_find_top_of_poison() should have checked.
+ */
+ poison_low = poison_high;
+ while (poison_low > task_stack_low) {
+ poison_low -= sizeof(unsigned long);
+
+ if (*(unsigned long *)poison_low == STACKLEAK_POISON)
+ continue;
+
+ instrumentation_begin();
+ pr_err("FAIL: non-poison value %lu bytes below poison boundary: 0x%lx\n",
+ poison_high - poison_low, *(unsigned long *)poison_low);
+ test_failed = true;
+ goto out;
+ }
+
+ instrumentation_begin();
+ pr_info("stackleak stack usage:\n"
+ " high offset: %lu bytes\n"
+ " current: %lu bytes\n"
+ " lowest: %lu bytes\n"
+ " tracked: %lu bytes\n"
+ " untracked: %lu bytes\n"
+ " poisoned: %lu bytes\n"
+ " low offset: %lu bytes\n",
+ task_stack_base + THREAD_SIZE - task_stack_high,
+ task_stack_high - current_sp,
+ task_stack_high - lowest_sp,
+ task_stack_high - untracked_high,
+ untracked_high - poison_high,
+ poison_high - task_stack_low,
+ task_stack_low - task_stack_base);
+
+out:
+ if (test_failed) {
+ pr_err("FAIL: the thread stack is NOT properly erased!\n");
+ } else {
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ }
+ instrumentation_end();
+}
+
+static void lkdtm_STACKLEAK_ERASING(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ check_stackleak_irqoff();
+ local_irq_restore(flags);
+}
+#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+static void lkdtm_STACKLEAK_ERASING(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
+ pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
+ } else {
+ pr_err("XFAIL: stackleak is not supported on this arch (HAVE_ARCH_STACKLEAK=n)\n");
+ }
+}
+#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(STACKLEAK_ERASING),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
new file mode 100644
index 0000000000..67db57249a
--- /dev/null
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst;
+static volatile size_t cache_size = 1024;
+static struct kmem_cache *whitelist_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+ return stack + unconst;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+ unsigned char buf[128];
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(buf); i++) {
+ buf[i] = value & 0xff;
+ }
+
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+ unsigned long user_addr;
+ unsigned char good_stack[32];
+ unsigned char *bad_stack;
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(good_stack); i++)
+ good_stack[i] = test_text[i % sizeof(test_text)];
+
+ /* This is a pointer to outside our current stack frame. */
+ if (bad_frame) {
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
+ } else {
+ /* Put start address just inside stack. */
+ bad_stack = task_stack_page(current) + THREAD_SIZE;
+ bad_stack -= sizeof(unsigned long);
+ }
+
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of local stack\n");
+ if (copy_to_user((void __user *)user_addr, good_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of distant stack\n");
+ if (copy_to_user((void __user *)user_addr, bad_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ /*
+ * There isn't a safe way to not be protected by usercopy
+ * if we're going to write to another thread's stack.
+ */
+ if (!bad_frame)
+ goto free_user;
+
+ pr_info("attempting good copy_from_user of local stack\n");
+ if (copy_from_user(good_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of distant stack\n");
+ if (copy_from_user(bad_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+/*
+ * This checks for whole-object size validation with hardened usercopy,
+ * with or without usercopy whitelisting.
+ */
+static void do_usercopy_slab_size(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *one, *two;
+ void __user *test_user_addr;
+ void *test_kern_addr;
+ size_t size = unconst + 1024;
+
+ one = kmalloc(size, GFP_KERNEL);
+ two = kmalloc(size, GFP_KERNEL);
+ if (!one || !two) {
+ pr_warn("Failed to allocate kernel memory\n");
+ goto free_kernel;
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_kernel;
+ }
+
+ memset(one, 'A', size);
+ memset(two, 'B', size);
+
+ test_user_addr = (void __user *)(user_addr + 16);
+ test_kern_addr = one + 16;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of correct size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of too large size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user of correct size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of too large size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+ pr_err("FAIL: bad usercopy not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+ kfree(one);
+ kfree(two);
+}
+
+/*
+ * This checks for the specific whitelist window within an object. If this
+ * test passes, then do_usercopy_slab_size() tests will pass too.
+ */
+static void do_usercopy_slab_whitelist(bool to_user)
+{
+ unsigned long user_alloc;
+ unsigned char *buf = NULL;
+ unsigned char __user *user_addr;
+ size_t offset, size;
+
+ /* Make sure cache was prepared. */
+ if (!whitelist_cache) {
+ pr_warn("Failed to allocate kernel cache\n");
+ return;
+ }
+
+ /*
+ * Allocate a buffer with a whitelisted window in the buffer.
+ */
+ buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("Failed to allocate buffer from whitelist cache\n");
+ goto free_alloc;
+ }
+
+ /* Allocate user memory we'll poke at. */
+ user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_alloc >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_alloc;
+ }
+ user_addr = (void __user *)user_alloc;
+
+ memset(buf, 'B', cache_size);
+
+ /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
+ offset = (cache_size / 4) + unconst;
+ size = (cache_size / 16) + unconst;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user inside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset, size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user outside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset - 1, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user inside whitelist\n");
+ if (copy_from_user(buf + offset, user_addr, size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user outside whitelist\n");
+ if (copy_from_user(buf + offset - 1, user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+ pr_err("FAIL: bad usercopy not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(user_alloc, PAGE_SIZE);
+free_alloc:
+ if (buf)
+ kmem_cache_free(whitelist_cache, buf);
+}
+
+/* Callable tests. */
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
+{
+ do_usercopy_slab_size(true);
+}
+
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
+{
+ do_usercopy_slab_size(false);
+}
+
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
+{
+ do_usercopy_slab_whitelist(true);
+}
+
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
+{
+ do_usercopy_slab_whitelist(false);
+}
+
+static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+ do_usercopy_stack(true, true);
+}
+
+static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+ do_usercopy_stack(false, true);
+}
+
+static void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+ do_usercopy_stack(true, false);
+}
+
+static void lkdtm_USERCOPY_KERNEL(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ pr_info("attempting good copy_to_user from kernel rodata: %px\n",
+ test_text);
+ if (copy_to_user((void __user *)user_addr, test_text,
+ unconst + sizeof(test_text))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user from kernel text: %px\n",
+ vm_mmap);
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
+ unconst + PAGE_SIZE)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = folio_address(folio);
+ if (addr)
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ else
+ pr_err("folio_address() failed?!\n");
+ folio_put(folio);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+ /* Prepare cache that lacks SLAB_USERCOPY flag. */
+ whitelist_cache =
+ kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
+ 0, 0,
+ cache_size / 4,
+ cache_size / 16,
+ NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+ kmem_cache_destroy(whitelist_cache);
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_FOLIO),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+struct crashtype_category usercopy_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};