summaryrefslogtreecommitdiffstats
path: root/drivers/misc/lkdtm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/lkdtm')
-rw-r--r--drivers/misc/lkdtm/Makefile22
-rw-r--r--drivers/misc/lkdtm/bugs.c492
-rw-r--r--drivers/misc/lkdtm/cfi.c42
-rw-r--r--drivers/misc/lkdtm/core.c489
-rw-r--r--drivers/misc/lkdtm/heap.c221
-rw-r--r--drivers/misc/lkdtm/lkdtm.h105
-rw-r--r--drivers/misc/lkdtm/perms.c229
-rw-r--r--drivers/misc/lkdtm/refcount.c392
-rw-r--r--drivers/misc/lkdtm/rodata.c11
-rw-r--r--drivers/misc/lkdtm/stackleak.c82
-rw-r--r--drivers/misc/lkdtm/usercopy.c353
11 files changed, 2438 insertions, 0 deletions
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
new file mode 100644
index 000000000..4405fb2bc
--- /dev/null
+++ b/drivers/misc/lkdtm/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LKDTM) += lkdtm.o
+
+lkdtm-$(CONFIG_LKDTM) += core.o
+lkdtm-$(CONFIG_LKDTM) += bugs.o
+lkdtm-$(CONFIG_LKDTM) += heap.o
+lkdtm-$(CONFIG_LKDTM) += perms.o
+lkdtm-$(CONFIG_LKDTM) += refcount.o
+lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM) += usercopy.o
+lkdtm-$(CONFIG_LKDTM) += stackleak.o
+lkdtm-$(CONFIG_LKDTM) += cfi.o
+
+KASAN_SANITIZE_stackleak.o := n
+KCOV_INSTRUMENT_rodata.o := n
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_rodata_objcopy.o := \
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents
+targets += rodata.o rodata_objcopy.o
+$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
+ $(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
new file mode 100644
index 000000000..d39b8139b
--- /dev/null
+++ b/drivers/misc/lkdtm/bugs.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
+#include <asm/desc.h>
+#endif
+
+struct lkdtm_list {
+ struct list_head node;
+};
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+/*
+ * Make sure compiler does not optimize this function or stack frame away:
+ * - function marked noinline
+ * - stack variables are marked volatile
+ * - stack variables are written (memset()) and read (pr_info())
+ * - function has external effects (pr_info())
+ * */
+static int noinline recursive_loop(int remaining)
+{
+ volatile char buf[REC_STACK_SIZE];
+
+ memset((void *)buf, remaining & 0xFF, sizeof(buf));
+ pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
+ recur_count);
+ if (!remaining)
+ return 0;
+ else
+ return recursive_loop(remaining - 1);
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+ if (*recur_param < 0)
+ *recur_param = recur_count;
+ else
+ recur_count = *recur_param;
+}
+
+void lkdtm_PANIC(void)
+{
+ panic("dumptest");
+}
+
+void lkdtm_BUG(void)
+{
+ BUG();
+}
+
+static int warn_counter;
+
+void lkdtm_WARNING(void)
+{
+ WARN_ON(++warn_counter);
+}
+
+void lkdtm_WARNING_MESSAGE(void)
+{
+ WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
+}
+
+void lkdtm_EXCEPTION(void)
+{
+ *((volatile int *) 0) = 0;
+}
+
+void lkdtm_LOOP(void)
+{
+ for (;;)
+ ;
+}
+
+void lkdtm_EXHAUST_STACK(void)
+{
+ pr_info("Calling function with %lu frame size to depth %d ...\n",
+ REC_STACK_SIZE, recur_count);
+ recursive_loop(recur_count);
+ pr_info("FAIL: survived without exhausting stack?!\n");
+}
+
+static noinline void __lkdtm_CORRUPT_STACK(void *stack)
+{
+ memset(stack, '\xff', 64);
+}
+
+/* This should trip the stack canary, not corrupt the return address. */
+noinline void lkdtm_CORRUPT_STACK(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *));
+
+ pr_info("Corrupting stack containing char array ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
+}
+
+/* Same as above but will only get a canary with -fstack-protector-strong */
+noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+{
+ union {
+ unsigned short shorts[4];
+ unsigned long *ptr;
+ } data __aligned(sizeof(void *));
+
+ pr_info("Corrupting stack containing union ...\n");
+ __lkdtm_CORRUPT_STACK((void *)&data);
+}
+
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
+}
+
+void lkdtm_SOFTLOCKUP(void)
+{
+ preempt_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_HARDLOCKUP(void)
+{
+ local_irq_disable();
+ for (;;)
+ cpu_relax();
+}
+
+void lkdtm_SPINLOCKUP(void)
+{
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ /* Let sparse know we intended to exit holding the lock. */
+ __release(&lock_me_up);
+}
+
+void lkdtm_HUNG_TASK(void)
+{
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+}
+
+volatile unsigned int huge = INT_MAX - 2;
+volatile unsigned int ignored;
+
+void lkdtm_OVERFLOW_SIGNED(void)
+{
+ int value;
+
+ value = huge;
+ pr_info("Normal signed addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing signed addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+
+void lkdtm_OVERFLOW_UNSIGNED(void)
+{
+ unsigned int value;
+
+ value = huge;
+ pr_info("Normal unsigned addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing unsigned addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+/* Intentionally using old-style flex array definition of 1 byte. */
+struct array_bounds_flex_array {
+ int one;
+ int two;
+ char data[1];
+};
+
+struct array_bounds {
+ int one;
+ int two;
+ char data[8];
+ int three;
+};
+
+void lkdtm_ARRAY_BOUNDS(void)
+{
+ struct array_bounds_flex_array *not_checked;
+ struct array_bounds *checked;
+ volatile int i;
+
+ not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
+ checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
+
+ pr_info("Array access within bounds ...\n");
+ /* For both, touch all bytes in the actual member size. */
+ for (i = 0; i < sizeof(checked->data); i++)
+ checked->data[i] = 'A';
+ /*
+ * For the uninstrumented flex array member, also touch 1 byte
+ * beyond to verify it is correctly uninstrumented.
+ */
+ for (i = 0; i < sizeof(not_checked->data) + 1; i++)
+ not_checked->data[i] = 'A';
+
+ pr_info("Array access beyond bounds ...\n");
+ for (i = 0; i < sizeof(checked->data) + 1; i++)
+ checked->data[i] = 'B';
+
+ kfree(not_checked);
+ kfree(checked);
+ pr_err("FAIL: survived array bounds overflow!\n");
+}
+
+void lkdtm_CORRUPT_LIST_ADD(void)
+{
+ /*
+ * Initially, an empty list via LIST_HEAD:
+ * test_head.next = &test_head
+ * test_head.prev = &test_head
+ */
+ LIST_HEAD(test_head);
+ struct lkdtm_list good, bad;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ pr_info("attempting good list addition\n");
+
+ /*
+ * Adding to the list performs these actions:
+ * test_head.next->prev = &good.node
+ * good.node.next = test_head.next
+ * good.node.prev = test_head
+ * test_head.next = good.node
+ */
+ list_add(&good.node, &test_head);
+
+ pr_info("attempting corrupted list addition\n");
+ /*
+ * In simulating this "write what where" primitive, the "what" is
+ * the address of &bad.node, and the "where" is the address held
+ * by "redirection".
+ */
+ test_head.next = redirection;
+ list_add(&bad.node, &test_head);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_add() corruption not detected!\n");
+}
+
+void lkdtm_CORRUPT_LIST_DEL(void)
+{
+ LIST_HEAD(test_head);
+ struct lkdtm_list item;
+ void *target[2] = { };
+ void *redirection = &target;
+
+ list_add(&item.node, &test_head);
+
+ pr_info("attempting good list removal\n");
+ list_del(&item.node);
+
+ pr_info("attempting corrupted list removal\n");
+ list_add(&item.node, &test_head);
+
+ /* As with the list_add() test above, this corrupts "next". */
+ item.node.next = redirection;
+ list_del(&item.node);
+
+ if (target[0] == NULL && target[1] == NULL)
+ pr_err("Overwrite did not happen, but no BUG?!\n");
+ else
+ pr_err("list_del() corruption not detected!\n");
+}
+
+/* Test that VMAP_STACK is actually allocating with a leading guard page */
+void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack - 1;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page below current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte);
+}
+
+/* Test that VMAP_STACK is actually allocating with a trailing guard page */
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+{
+ const unsigned char *stack = task_stack_page(current);
+ const unsigned char *ptr = stack + THREAD_SIZE;
+ volatile unsigned char byte;
+
+ pr_info("attempting bad read from page above current stack\n");
+
+ byte = *ptr;
+
+ pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
+}
+
+void lkdtm_UNSET_SMEP(void)
+{
+#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
+#define MOV_CR4_DEPTH 64
+ void (*direct_write_cr4)(unsigned long val);
+ unsigned char *insn;
+ unsigned long cr4;
+ int i;
+
+ cr4 = native_read_cr4();
+
+ if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) {
+ pr_err("FAIL: SMEP not in use\n");
+ return;
+ }
+ cr4 &= ~(X86_CR4_SMEP);
+
+ pr_info("trying to clear SMEP normally\n");
+ native_write_cr4(cr4);
+ if (cr4 == native_read_cr4()) {
+ pr_err("FAIL: pinning SMEP failed!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ return;
+ }
+ pr_info("ok: SMEP did not get cleared\n");
+
+ /*
+ * To test the post-write pinning verification we need to call
+ * directly into the middle of native_write_cr4() where the
+ * cr4 write happens, skipping any pinning. This searches for
+ * the cr4 writing instruction.
+ */
+ insn = (unsigned char *)native_write_cr4;
+ for (i = 0; i < MOV_CR4_DEPTH; i++) {
+ /* mov %rdi, %cr4 */
+ if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7)
+ break;
+ /* mov %rdi,%rax; mov %rax, %cr4 */
+ if (insn[i] == 0x48 && insn[i+1] == 0x89 &&
+ insn[i+2] == 0xf8 && insn[i+3] == 0x0f &&
+ insn[i+4] == 0x22 && insn[i+5] == 0xe0)
+ break;
+ }
+ if (i >= MOV_CR4_DEPTH) {
+ pr_info("ok: cannot locate cr4 writing call gadget\n");
+ return;
+ }
+ direct_write_cr4 = (void *)(insn + i);
+
+ pr_info("trying to clear SMEP with call gadget\n");
+ direct_write_cr4(cr4);
+ if (native_read_cr4() & X86_CR4_SMEP) {
+ pr_info("ok: SMEP removal was reverted\n");
+ } else {
+ pr_err("FAIL: cleared SMEP not detected!\n");
+ cr4 |= X86_CR4_SMEP;
+ pr_info("restoring SMEP\n");
+ native_write_cr4(cr4);
+ }
+#else
+ pr_err("XFAIL: this test is x86_64-only\n");
+#endif
+}
+
+void lkdtm_DOUBLE_FAULT(void)
+{
+#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
+ /*
+ * Trigger #DF by setting the stack limit to zero. This clobbers
+ * a GDT TLS slot, which is okay because the current task will die
+ * anyway due to the double fault.
+ */
+ struct desc_struct d = {
+ .type = 3, /* expand-up, writable, accessed data */
+ .p = 1, /* present */
+ .d = 1, /* 32-bit */
+ .g = 0, /* limit in bytes */
+ .s = 1, /* not system */
+ };
+
+ local_irq_disable();
+ write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()),
+ GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S);
+
+ /*
+ * Put our zero-limit segment in SS and then trigger a fault. The
+ * 4-byte access to (%esp) will fault with #SS, and the attempt to
+ * deliver the fault will recursively cause #SS and result in #DF.
+ * This whole process happens while NMIs and MCEs are blocked by the
+ * MOV SS window. This is nice because an NMI with an invalid SS
+ * would also double-fault, resulting in the NMI or MCE being lost.
+ */
+ asm volatile ("movw %0, %%ss; addl $0, (%%esp)" ::
+ "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3)));
+
+ pr_err("FAIL: tried to double fault but didn't die\n");
+#else
+ pr_err("XFAIL: this test is ia32-only\n");
+#endif
+}
+
+#ifdef CONFIG_ARM64
+static noinline void change_pac_parameters(void)
+{
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) {
+ /* Reset the keys of current task */
+ ptrauth_thread_init_kernel(current);
+ ptrauth_thread_switch_kernel(current);
+ }
+}
+#endif
+
+noinline void lkdtm_CORRUPT_PAC(void)
+{
+#ifdef CONFIG_ARM64
+#define CORRUPT_PAC_ITERATE 10
+ int i;
+
+ if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH))
+ pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n");
+
+ if (!system_supports_address_auth()) {
+ pr_err("FAIL: CPU lacks pointer authentication feature\n");
+ return;
+ }
+
+ pr_info("changing PAC parameters to force function return failure...\n");
+ /*
+ * PAC is a hash value computed from input keys, return address and
+ * stack pointer. As pac has fewer bits so there is a chance of
+ * collision, so iterate few times to reduce the collision probability.
+ */
+ for (i = 0; i < CORRUPT_PAC_ITERATE; i++)
+ change_pac_parameters();
+
+ pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n");
+#else
+ pr_err("XFAIL: this test is arm64-only\n");
+#endif
+}
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
new file mode 100644
index 000000000..e73ebdbfa
--- /dev/null
+++ b/drivers/misc/lkdtm/cfi.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to Control Flow Integrity.
+ */
+#include "lkdtm.h"
+
+static int called_count;
+
+/* Function taking one argument, without a return value. */
+static noinline void lkdtm_increment_void(int *counter)
+{
+ (*counter)++;
+}
+
+/* Function taking one argument, returning int. */
+static noinline int lkdtm_increment_int(int *counter)
+{
+ (*counter)++;
+
+ return *counter;
+}
+/*
+ * This tries to call an indirect function with a mismatched prototype.
+ */
+void lkdtm_CFI_FORWARD_PROTO(void)
+{
+ /*
+ * Matches lkdtm_increment_void()'s prototype, but not
+ * lkdtm_increment_int()'s prototype.
+ */
+ void (*func)(int *);
+
+ pr_info("Calling matched prototype ...\n");
+ func = lkdtm_increment_void;
+ func(&called_count);
+
+ pr_info("Calling mismatched prototype ...\n");
+ func = (void *)lkdtm_increment_int;
+ func(&called_count);
+
+ pr_info("Fail: survived mismatched prototype function call!\n");
+}
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
new file mode 100644
index 000000000..32b3d7736
--- /dev/null
+++ b/drivers/misc/lkdtm/core.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.rst for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off);
+# define CRASHPOINT_KPROBE(_symbol) \
+ .kprobe = { \
+ .symbol_name = (_symbol), \
+ .pre_handler = lkdtm_kprobe_handler, \
+ },
+# define CRASHPOINT_WRITE(_symbol) \
+ (_symbol) ? lkdtm_debugfs_entry : direct_entry
+#else
+# define CRASHPOINT_KPROBE(_symbol)
+# define CRASHPOINT_WRITE(_symbol) direct_entry
+#endif
+
+/* Crash points */
+struct crashpoint {
+ const char *name;
+ const struct file_operations fops;
+ struct kprobe kprobe;
+};
+
+#define CRASHPOINT(_name, _symbol) \
+ { \
+ .name = _name, \
+ .fops = { \
+ .read = lkdtm_debugfs_read, \
+ .llseek = generic_file_llseek, \
+ .open = lkdtm_debugfs_open, \
+ .write = CRASHPOINT_WRITE(_symbol) \
+ }, \
+ CRASHPOINT_KPROBE(_symbol) \
+ }
+
+/* Define the possible places where we can trigger a crash point. */
+static struct crashpoint crashpoints[] = {
+ CRASHPOINT("DIRECT", NULL),
+#ifdef CONFIG_KPROBES
+ CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"),
+ CRASHPOINT("INT_HW_IRQ_EN", "handle_irq_event"),
+ CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"),
+ CRASHPOINT("FS_DEVRW", "ll_rw_block"),
+ CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
+ CRASHPOINT("TIMERADD", "hrtimer_start"),
+ CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
+ CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
+#endif
+};
+
+
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Define the possible types of crashes that can be triggered. */
+static const struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(CORRUPT_PAC),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(OVERWRITE_ALLOCATION),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_TIMING),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
+ CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_KERNEL),
+ CRASHTYPE(STACKLEAK_ERASING),
+ CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(DOUBLE_FAULT),
+};
+
+
+/* Global kprobe entry and crashtype. */
+static struct kprobe *lkdtm_kprobe;
+static struct crashpoint *lkdtm_crashpoint;
+static const struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+ "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+ "crash point is to be hit to trigger action");
+
+
+/* Return the crashtype number or NULL if the name is invalid */
+static const struct crashtype *find_crashtype(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ if (!strcmp(name, crashtypes[i].name))
+ return &crashtypes[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(const struct crashtype *crashtype)
+{
+ if (WARN_ON(!crashtype || !crashtype->func))
+ return;
+ crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+ const struct crashtype *crashtype)
+{
+ int ret;
+
+ /* If this doesn't have a symbol, just call immediately. */
+ if (!crashpoint->kprobe.symbol_name) {
+ lkdtm_do_action(crashtype);
+ return 0;
+ }
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ lkdtm_crashpoint = crashpoint;
+ lkdtm_crashtype = crashtype;
+ lkdtm_kprobe = &crashpoint->kprobe;
+ ret = register_kprobe(lkdtm_kprobe);
+ if (ret < 0) {
+ pr_info("Couldn't register kprobe %s\n",
+ crashpoint->kprobe.symbol_name);
+ lkdtm_kprobe = NULL;
+ lkdtm_crashpoint = NULL;
+ lkdtm_crashtype = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by kprobe entry points. */
+static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs)
+{
+ unsigned long flags;
+ bool do_it = false;
+
+ if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype))
+ return 0;
+
+ spin_lock_irqsave(&crash_count_lock, flags);
+ crash_count--;
+ pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+ lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+ if (crash_count == 0) {
+ do_it = true;
+ crash_count = cpoint_count;
+ }
+ spin_unlock_irqrestore(&crash_count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(lkdtm_crashtype);
+
+ return 0;
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+ const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ struct crashpoint *crashpoint = file_inode(f)->i_private;
+ const struct crashtype *crashtype = NULL;
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long)buf);
+
+ if (!crashtype)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtypes[i].name);
+ }
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ const struct crashtype *crashtype;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ crashtype = find_crashtype(buf);
+ free_page((unsigned long) buf);
+ if (!crashtype)
+ return -EINVAL;
+
+ pr_info("Performing direct entry %s\n", crashtype->name);
+ lkdtm_do_action(crashtype);
+ *off += count;
+
+ return count;
+}
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ struct crashpoint *crashpoint = NULL;
+ const struct crashtype *crashtype = NULL;
+ int ret;
+ int i;
+
+ /* Neither or both of these need to be set */
+ if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+ pr_err("Need both cpoint_type and cpoint_name or neither\n");
+ return -EINVAL;
+ }
+
+ if (cpoint_type) {
+ crashtype = find_crashtype(cpoint_type);
+ if (!crashtype) {
+ pr_err("Unknown crashtype '%s'\n", cpoint_type);
+ return -EINVAL;
+ }
+ }
+
+ if (cpoint_name) {
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ if (!strcmp(cpoint_name, crashpoints[i].name))
+ crashpoint = &crashpoints[i];
+ }
+
+ /* Refuse unknown crashpoints. */
+ if (!crashpoint) {
+ pr_err("Invalid crashpoint %s\n", cpoint_name);
+ return -EINVAL;
+ }
+ }
+
+#ifdef CONFIG_KPROBES
+ /* Set crash count. */
+ crash_count = cpoint_count;
+#endif
+
+ /* Handle test-specific initialization. */
+ lkdtm_bugs_init(&recur_count);
+ lkdtm_perms_init();
+ lkdtm_usercopy_init();
+ lkdtm_heap_init();
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+
+ /* Install debugfs trigger files. */
+ for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+ struct crashpoint *cur = &crashpoints[i];
+
+ debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root, cur,
+ &cur->fops);
+ }
+
+ /* Install crashpoint if one was selected. */
+ if (crashpoint) {
+ ret = lkdtm_register_cpoint(crashpoint, crashtype);
+ if (ret < 0) {
+ pr_info("Invalid crashpoint %s\n", crashpoint->name);
+ goto out_err;
+ }
+ pr_info("Crash point %s of type %s registered\n",
+ crashpoint->name, cpoint_type);
+ } else {
+ pr_info("No crash points registered, enable through debugfs\n");
+ }
+
+ return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ /* Handle test-specific clean-up. */
+ lkdtm_heap_exit();
+ lkdtm_usercopy_exit();
+
+ if (lkdtm_kprobe != NULL)
+ unregister_kprobe(lkdtm_kprobe);
+
+ pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
new file mode 100644
index 000000000..1323bc16f
--- /dev/null
+++ b/drivers/misc/lkdtm/heap.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+static struct kmem_cache *double_free_cache;
+static struct kmem_cache *a_cache;
+static struct kmem_cache *b_cache;
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ */
+void lkdtm_OVERWRITE_ALLOCATION(void)
+{
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+ if (!data)
+ return;
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+}
+
+void lkdtm_WRITE_AFTER_FREE(void)
+{
+ int *base, *again;
+ size_t len = 1024;
+ /*
+ * The slub allocator uses the first word to store the free
+ * pointer in some configurations. Use the middle of the
+ * allocation to avoid running into the freelist
+ */
+ size_t offset = (len / sizeof(*base)) / 2;
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base)
+ return;
+ pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+ pr_info("Attempting bad write to freed memory at %p\n",
+ &base[offset]);
+ kfree(base);
+ base[offset] = 0x0abcdef0;
+ /* Attempt to notice the overwrite. */
+ again = kmalloc(len, GFP_KERNEL);
+ kfree(again);
+ if (again != base)
+ pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+void lkdtm_READ_AFTER_FREE(void)
+{
+ int *base, *val, saw;
+ size_t len = 1024;
+ /*
+ * The slub allocator will use the either the first word or
+ * the middle of the allocation to store the free pointer,
+ * depending on configurations. Store in the second word to
+ * avoid running into the freelist.
+ */
+ size_t offset = sizeof(*base);
+
+ base = kmalloc(len, GFP_KERNEL);
+ if (!base) {
+ pr_info("Unable to allocate base memory.\n");
+ return;
+ }
+
+ val = kmalloc(len, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ kfree(base);
+ return;
+ }
+
+ *val = 0x12345678;
+ base[offset] = *val;
+ pr_info("Value in memory before free: %x\n", base[offset]);
+
+ kfree(base);
+
+ pr_info("Attempting bad read from freed memory\n");
+ saw = base[offset];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Memory was not poisoned\n");
+
+ kfree(val);
+}
+
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ pr_info("Writing to the buddy page before free\n");
+ memset((void *)p, 0x3, PAGE_SIZE);
+ free_page(p);
+ schedule();
+ pr_info("Attempting bad write to the buddy page after free\n");
+ memset((void *)p, 0x78, PAGE_SIZE);
+ /* Attempt to notice the overwrite. */
+ p = __get_free_page(GFP_KERNEL);
+ free_page(p);
+ schedule();
+}
+
+void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+ int saw, *val;
+ int *base;
+
+ if (!p) {
+ pr_info("Unable to allocate free page\n");
+ return;
+ }
+
+ val = kmalloc(1024, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate val memory.\n");
+ free_page(p);
+ return;
+ }
+
+ base = (int *)p;
+
+ *val = 0x12345678;
+ base[0] = *val;
+ pr_info("Value in memory before free: %x\n", base[0]);
+ free_page(p);
+ pr_info("Attempting to read from freed memory\n");
+ saw = base[0];
+ if (saw != *val) {
+ /* Good! Poisoning happened, so declare a win. */
+ pr_info("Memory correctly poisoned (%x)\n", saw);
+ BUG();
+ }
+ pr_info("Buddy page was not poisoned\n");
+
+ kfree(val);
+}
+
+void lkdtm_SLAB_FREE_DOUBLE(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate double_free_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345678;
+ pr_info("Attempting double slab free ...\n");
+ kmem_cache_free(double_free_cache, val);
+ kmem_cache_free(double_free_cache, val);
+}
+
+void lkdtm_SLAB_FREE_CROSS(void)
+{
+ int *val;
+
+ val = kmem_cache_alloc(a_cache, GFP_KERNEL);
+ if (!val) {
+ pr_info("Unable to allocate a_cache memory.\n");
+ return;
+ }
+
+ /* Just make sure we got real memory. */
+ *val = 0x12345679;
+ pr_info("Attempting cross-cache slab free ...\n");
+ kmem_cache_free(b_cache, val);
+}
+
+void lkdtm_SLAB_FREE_PAGE(void)
+{
+ unsigned long p = __get_free_page(GFP_KERNEL);
+
+ pr_info("Attempting non-Slab slab free ...\n");
+ kmem_cache_free(NULL, (void *)p);
+ free_page(p);
+}
+
+/*
+ * We have constructors to keep the caches distinctly separated without
+ * needing to boot with "slab_nomerge".
+ */
+static void ctor_double_free(void *region)
+{ }
+static void ctor_a(void *region)
+{ }
+static void ctor_b(void *region)
+{ }
+
+void __init lkdtm_heap_init(void)
+{
+ double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
+ 64, 0, 0, ctor_double_free);
+ a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
+ b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
+}
+
+void __exit lkdtm_heap_exit(void)
+{
+ kmem_cache_destroy(double_free_cache);
+ kmem_cache_destroy(a_cache);
+ kmem_cache_destroy(b_cache);
+}
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
new file mode 100644
index 000000000..6dec4c9b4
--- /dev/null
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+/* lkdtm_bugs.c */
+void __init lkdtm_bugs_init(int *recur_param);
+void lkdtm_PANIC(void);
+void lkdtm_BUG(void);
+void lkdtm_WARNING(void);
+void lkdtm_WARNING_MESSAGE(void);
+void lkdtm_EXCEPTION(void);
+void lkdtm_LOOP(void);
+void lkdtm_EXHAUST_STACK(void);
+void lkdtm_CORRUPT_STACK(void);
+void lkdtm_CORRUPT_STACK_STRONG(void);
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
+void lkdtm_SOFTLOCKUP(void);
+void lkdtm_HARDLOCKUP(void);
+void lkdtm_SPINLOCKUP(void);
+void lkdtm_HUNG_TASK(void);
+void lkdtm_OVERFLOW_SIGNED(void);
+void lkdtm_OVERFLOW_UNSIGNED(void);
+void lkdtm_ARRAY_BOUNDS(void);
+void lkdtm_CORRUPT_LIST_ADD(void);
+void lkdtm_CORRUPT_LIST_DEL(void);
+void lkdtm_STACK_GUARD_PAGE_LEADING(void);
+void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
+void lkdtm_UNSET_SMEP(void);
+void lkdtm_DOUBLE_FAULT(void);
+void lkdtm_CORRUPT_PAC(void);
+
+/* lkdtm_heap.c */
+void __init lkdtm_heap_init(void);
+void __exit lkdtm_heap_exit(void);
+void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_WRITE_AFTER_FREE(void);
+void lkdtm_READ_AFTER_FREE(void);
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
+void lkdtm_READ_BUDDY_AFTER_FREE(void);
+void lkdtm_SLAB_FREE_DOUBLE(void);
+void lkdtm_SLAB_FREE_CROSS(void);
+void lkdtm_SLAB_FREE_PAGE(void);
+
+/* lkdtm_perms.c */
+void __init lkdtm_perms_init(void);
+void lkdtm_WRITE_RO(void);
+void lkdtm_WRITE_RO_AFTER_INIT(void);
+void lkdtm_WRITE_KERN(void);
+void lkdtm_EXEC_DATA(void);
+void lkdtm_EXEC_STACK(void);
+void lkdtm_EXEC_KMALLOC(void);
+void lkdtm_EXEC_VMALLOC(void);
+void lkdtm_EXEC_RODATA(void);
+void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_EXEC_NULL(void);
+void lkdtm_ACCESS_USERSPACE(void);
+void lkdtm_ACCESS_NULL(void);
+
+/* lkdtm_refcount.c */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
+void lkdtm_REFCOUNT_DEC_ZERO(void);
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
+void lkdtm_REFCOUNT_INC_ZERO(void);
+void lkdtm_REFCOUNT_ADD_ZERO(void);
+void lkdtm_REFCOUNT_INC_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_SATURATED(void);
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
+void lkdtm_REFCOUNT_TIMING(void);
+void lkdtm_ATOMIC_TIMING(void);
+
+/* lkdtm_rodata.c */
+void lkdtm_rodata_do_nothing(void);
+
+/* lkdtm_usercopy.c */
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
+void lkdtm_USERCOPY_STACK_FRAME_TO(void);
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
+void lkdtm_USERCOPY_STACK_BEYOND(void);
+void lkdtm_USERCOPY_KERNEL(void);
+
+/* lkdtm_stackleak.c */
+void lkdtm_STACKLEAK_ERASING(void);
+
+/* cfi.c */
+void lkdtm_CFI_FORWARD_PROTO(void);
+
+#endif
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
new file mode 100644
index 000000000..2dede2ef6
--- /dev/null
+++ b/drivers/misc/lkdtm/perms.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE true
+#define CODE_AS_IS false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is cost, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static void do_nothing(void)
+{
+ return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+ pr_info("do_overwritten wasn't overwritten!\n");
+ return;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %px\n", do_nothing);
+ do_nothing();
+
+ if (write == CODE_WRITE) {
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ flush_icache_range((unsigned long)dst,
+ (unsigned long)dst + EXEC_SIZE);
+ }
+ pr_info("attempting bad execution at %px\n", func);
+ func();
+ pr_err("FAIL: func returned\n");
+}
+
+static void execute_user_location(void *dst)
+{
+ int copied;
+
+ /* Intentionally crossing kernel/user memory boundary. */
+ void (*func)(void) = dst;
+
+ pr_info("attempting ok execution at %px\n", do_nothing);
+ do_nothing();
+
+ copied = access_process_vm(current, (unsigned long)dst, do_nothing,
+ EXEC_SIZE, FOLL_WRITE);
+ if (copied < EXEC_SIZE)
+ return;
+ pr_info("attempting bad execution at %px\n", func);
+ func();
+ pr_err("FAIL: func returned\n");
+}
+
+void lkdtm_WRITE_RO(void)
+{
+ /* Explicitly cast away "const" for the test and make volatile. */
+ volatile unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %px\n", ptr);
+ *ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
+}
+
+void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+ volatile unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ return;
+ }
+
+ pr_info("attempting bad ro_after_init write at %px\n", ptr);
+ *ptr ^= 0xabcd1234;
+ pr_err("FAIL: survived bad write\n");
+}
+
+void lkdtm_WRITE_KERN(void)
+{
+ size_t size;
+ volatile unsigned char *ptr;
+
+ size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+ ptr = (unsigned char *)do_overwritten;
+
+ pr_info("attempting bad %zu byte write at %px\n", size, ptr);
+ memcpy((void *)ptr, (unsigned char *)do_nothing, size);
+ flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+ pr_err("FAIL: survived bad write\n");
+
+ do_overwritten();
+}
+
+void lkdtm_EXEC_DATA(void)
+{
+ execute_location(data_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_STACK(void)
+{
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_KMALLOC(void)
+{
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area, CODE_WRITE);
+ kfree(kmalloc_area);
+}
+
+void lkdtm_EXEC_VMALLOC(void)
+{
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area, CODE_WRITE);
+ vfree(vmalloc_area);
+}
+
+void lkdtm_EXEC_RODATA(void)
+{
+ execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
+}
+
+void lkdtm_EXEC_USERSPACE(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+ execute_user_location((void *)user_addr);
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_EXEC_NULL(void)
+{
+ execute_location(NULL, CODE_AS_IS);
+}
+
+void lkdtm_ACCESS_USERSPACE(void)
+{
+ unsigned long user_addr, tmp = 0;
+ unsigned long *ptr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+ pr_warn("copy_to_user failed\n");
+ vm_munmap(user_addr, PAGE_SIZE);
+ return;
+ }
+
+ ptr = (unsigned long *)user_addr;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
+
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_ACCESS_NULL(void)
+{
+ unsigned long tmp;
+ volatile unsigned long *ptr = (unsigned long *)NULL;
+
+ pr_info("attempting bad read at %px\n", ptr);
+ tmp = *ptr;
+ tmp += 0xc0dec0de;
+ pr_err("FAIL: survived bad read\n");
+
+ pr_info("attempting bad write at %px\n", ptr);
+ *ptr = tmp;
+ pr_err("FAIL: survived bad write\n");
+}
+
+void __init lkdtm_perms_init(void)
+{
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+}
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
new file mode 100644
index 000000000..de7c5ab52
--- /dev/null
+++ b/drivers/misc/lkdtm/refcount.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to refcount bugs (e.g. overflow,
+ * underflow, reaching zero untested, etc).
+ */
+#include "lkdtm.h"
+#include <linux/refcount.h>
+
+static void overflow_check(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Overflow detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Overflow detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount wrapped to %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() above the maximum value of the refcount implementation,
+ * should at least saturate, and at most also WARN.
+ */
+void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_inc() without overflow\n");
+ refcount_dec(&over);
+ refcount_inc(&over);
+
+ pr_info("attempting bad refcount_inc() overflow\n");
+ refcount_inc(&over);
+ refcount_inc(&over);
+
+ overflow_check(&over);
+}
+
+/* refcount_add() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
+
+ pr_info("attempting good refcount_add() without overflow\n");
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_dec(&over);
+ refcount_add(4, &over);
+
+ pr_info("attempting bad refcount_add() overflow\n");
+ refcount_add(4, &over);
+
+ overflow_check(&over);
+}
+
+/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_inc_not_zero() overflow\n");
+ if (!refcount_inc_not_zero(&over))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+/* refcount_add_not_zero() should behave just like refcount_inc() above. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+{
+ refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
+
+ pr_info("attempting bad refcount_add_not_zero() overflow\n");
+ if (!refcount_add_not_zero(6, &over))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ overflow_check(&over);
+}
+
+static void check_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ case 0:
+ pr_warn("Still at zero: refcount_inc/add() must not inc-from-0\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_dec(), as opposed to a refcount_dec_and_test(), when it hits
+ * zero it should either saturate (when inc-from-zero isn't protected)
+ * or stay at zero (when inc-from-zero is protected) and should WARN for both.
+ */
+void lkdtm_REFCOUNT_DEC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(2);
+
+ pr_info("attempting good refcount_dec()\n");
+ refcount_dec(&zero);
+
+ pr_info("attempting bad refcount_dec() to zero\n");
+ refcount_dec(&zero);
+
+ check_zero(&zero);
+}
+
+static void check_negative(refcount_t *ref, int start)
+{
+ /*
+ * refcount_t refuses to move a refcount at all on an
+ * over-sub, so we have to track our starting position instead of
+ * looking only at zero-pinning.
+ */
+ if (refcount_read(ref) == start) {
+ pr_warn("Still at %d: refcount_inc/add() must not inc-from-0\n",
+ start);
+ return;
+ }
+
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Negative detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Negative detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/* A refcount_dec() going negative should saturate and may WARN. */
+void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec() below zero\n");
+ refcount_dec(&neg);
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_dec_and_test() should act like refcount_dec() above when
+ * going negative.
+ */
+void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(0);
+
+ pr_info("attempting bad refcount_dec_and_test() below zero\n");
+ if (refcount_dec_and_test(&neg))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_negative(&neg, 0);
+}
+
+/*
+ * A refcount_sub_and_test() should act like refcount_dec_and_test()
+ * above when going negative.
+ */
+void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+{
+ refcount_t neg = REFCOUNT_INIT(3);
+
+ pr_info("attempting bad refcount_sub_and_test() below zero\n");
+ if (refcount_sub_and_test(5, &neg))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_negative(&neg, 3);
+}
+
+static void check_from_zero(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case 0:
+ pr_info("Zero detected: stayed at zero\n");
+ break;
+ case REFCOUNT_SATURATED:
+ pr_info("Zero detected: saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Zero detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_info("Fail: zero not detected, incremented to %d\n",
+ refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from zero should pin to zero or saturate and may WARN.
+ */
+void lkdtm_REFCOUNT_INC_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_inc_not_zero() from zero\n");
+ if (!refcount_inc_not_zero(&zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero!\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_inc() from zero\n");
+ refcount_inc(&zero);
+
+ check_from_zero(&zero);
+}
+
+/*
+ * A refcount_add() should act like refcount_inc() above when starting
+ * at zero.
+ */
+void lkdtm_REFCOUNT_ADD_ZERO(void)
+{
+ refcount_t zero = REFCOUNT_INIT(0);
+
+ pr_info("attempting safe refcount_add_not_zero() from zero\n");
+ if (!refcount_add_not_zero(3, &zero)) {
+ pr_info("Good: zero detected\n");
+ if (refcount_read(&zero) == 0)
+ pr_info("Correctly stayed at zero\n");
+ else
+ pr_err("Fail: refcount went past zero\n");
+ } else {
+ pr_err("Fail: Zero not detected!?\n");
+ }
+
+ pr_info("attempting bad refcount_add() from zero\n");
+ refcount_add(3, &zero);
+
+ check_from_zero(&zero);
+}
+
+static void check_saturated(refcount_t *ref)
+{
+ switch (refcount_read(ref)) {
+ case REFCOUNT_SATURATED:
+ pr_info("Saturation detected: still saturated\n");
+ break;
+ case REFCOUNT_MAX:
+ pr_warn("Saturation detected: unsafely reset to max\n");
+ break;
+ default:
+ pr_err("Fail: refcount went crazy: %d\n", refcount_read(ref));
+ }
+}
+
+/*
+ * A refcount_inc() from a saturated value should at most warn about
+ * being saturated already.
+ */
+void lkdtm_REFCOUNT_INC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc() from saturated\n");
+ refcount_inc(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_dec(&sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec() from saturated\n");
+ refcount_add(8, &sat);
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_inc_not_zero() from saturated\n");
+ if (!refcount_inc_not_zero(&sat))
+ pr_warn("Weird: refcount_inc_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_add_not_zero() from saturated\n");
+ if (!refcount_add_not_zero(7, &sat))
+ pr_warn("Weird: refcount_add_not_zero() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_dec_and_test() from saturated\n");
+ if (refcount_dec_and_test(&sat))
+ pr_warn("Weird: refcount_dec_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Should act like refcount_inc() above from saturated. */
+void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+{
+ refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
+
+ pr_info("attempting bad refcount_sub_and_test() from saturated\n");
+ if (refcount_sub_and_test(8, &sat))
+ pr_warn("Weird: refcount_sub_and_test() reported zero\n");
+
+ check_saturated(&sat);
+}
+
+/* Used to time the existing atomic_t when used for reference counting */
+void lkdtm_ATOMIC_TIMING(void)
+{
+ unsigned int i;
+ atomic_t count = ATOMIC_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ atomic_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (atomic_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("atomic timing: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("atomic timing: done\n");
+}
+
+/*
+ * This can be compared to ATOMIC_TIMING when implementing fast refcount
+ * protections. Looking at the number of CPU cycles tells the real story
+ * about performance. For example:
+ * cd /sys/kernel/debug/provoke-crash
+ * perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
+ */
+void lkdtm_REFCOUNT_TIMING(void)
+{
+ unsigned int i;
+ refcount_t count = REFCOUNT_INIT(1);
+
+ for (i = 0; i < INT_MAX - 1; i++)
+ refcount_inc(&count);
+
+ for (i = INT_MAX; i > 0; i--)
+ if (refcount_dec_and_test(&count))
+ break;
+
+ if (i != 1)
+ pr_err("refcount: out of sync up/down cycle: %u\n", i - 1);
+ else
+ pr_info("refcount timing: done\n");
+}
diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c
new file mode 100644
index 000000000..baacb876d
--- /dev/null
+++ b/drivers/misc/lkdtm/rodata.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void noinstr lkdtm_rodata_do_nothing(void)
+{
+ /* Does nothing. We just want an architecture agnostic "return". */
+}
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
new file mode 100644
index 000000000..d1a5c0705
--- /dev/null
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with STACKLEAK_POISON).
+ *
+ * Authors:
+ * Alexander Popov <alex.popov@linux.com>
+ * Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/stackleak.h>
+
+void lkdtm_STACKLEAK_ERASING(void)
+{
+ unsigned long *sp, left, found, i;
+ const unsigned long check_depth =
+ STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+ bool test_failed = false;
+
+ /*
+ * For the details about the alignment of the poison values, see
+ * the comment in stackleak_track_stack().
+ */
+ sp = PTR_ALIGN(&i, sizeof(unsigned long));
+
+ left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long);
+ sp--;
+
+ /*
+ * One 'long int' at the bottom of the thread stack is reserved
+ * and not poisoned.
+ */
+ if (left > 1) {
+ left--;
+ } else {
+ pr_err("FAIL: not enough stack space for the test\n");
+ test_failed = true;
+ goto end;
+ }
+
+ pr_info("checking unused part of the thread stack (%lu bytes)...\n",
+ left * sizeof(unsigned long));
+
+ /*
+ * Search for 'check_depth' poison values in a row (just like
+ * stackleak_erase() does).
+ */
+ for (i = 0, found = 0; i < left && found <= check_depth; i++) {
+ if (*(sp - i) == STACKLEAK_POISON)
+ found++;
+ else
+ found = 0;
+ }
+
+ if (found <= check_depth) {
+ pr_err("FAIL: the erased part is not found (checked %lu bytes)\n",
+ i * sizeof(unsigned long));
+ test_failed = true;
+ goto end;
+ }
+
+ pr_info("the erased part begins after %lu not poisoned bytes\n",
+ (i - found) * sizeof(unsigned long));
+
+ /* The rest of thread stack should be erased */
+ for (; i < left; i++) {
+ if (*(sp - i) != STACKLEAK_POISON) {
+ pr_err("FAIL: bad value number %lu in the erased part: 0x%lx\n",
+ i, *(sp - i));
+ test_failed = true;
+ }
+ }
+
+end:
+ if (test_failed) {
+ pr_err("FAIL: the thread stack is NOT properly erased\n");
+ dump_stack();
+ } else {
+ pr_info("OK: the rest of the thread stack is properly erased\n");
+ }
+}
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
new file mode 100644
index 000000000..cde265548
--- /dev/null
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Many of the tests here end up using const sizes, but those would
+ * normally be ignored by hardened usercopy, so force the compiler
+ * into choosing the non-const path to make sure we trigger the
+ * hardened usercopy checks by added "unconst" to all the const copies,
+ * and making sure "cache_size" isn't optimized into a const.
+ */
+static volatile size_t unconst;
+static volatile size_t cache_size = 1024;
+static struct kmem_cache *whitelist_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+ return stack + unconst;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+ unsigned char buf[128];
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(buf); i++) {
+ buf[i] = value & 0xff;
+ }
+
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+ unsigned long user_addr;
+ unsigned char good_stack[32];
+ unsigned char *bad_stack;
+ int i;
+
+ /* Exercise stack to avoid everything living in registers. */
+ for (i = 0; i < sizeof(good_stack); i++)
+ good_stack[i] = test_text[i % sizeof(test_text)];
+
+ /* This is a pointer to outside our current stack frame. */
+ if (bad_frame) {
+ bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
+ } else {
+ /* Put start address just inside stack. */
+ bad_stack = task_stack_page(current) + THREAD_SIZE;
+ bad_stack -= sizeof(unsigned long);
+ }
+
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of local stack\n");
+ if (copy_to_user((void __user *)user_addr, good_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of distant stack\n");
+ if (copy_to_user((void __user *)user_addr, bad_stack,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ /*
+ * There isn't a safe way to not be protected by usercopy
+ * if we're going to write to another thread's stack.
+ */
+ if (!bad_frame)
+ goto free_user;
+
+ pr_info("attempting good copy_from_user of local stack\n");
+ if (copy_from_user(good_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of distant stack\n");
+ if (copy_from_user(bad_stack, (void __user *)user_addr,
+ unconst + sizeof(good_stack))) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+/*
+ * This checks for whole-object size validation with hardened usercopy,
+ * with or without usercopy whitelisting.
+ */
+static void do_usercopy_heap_size(bool to_user)
+{
+ unsigned long user_addr;
+ unsigned char *one, *two;
+ void __user *test_user_addr;
+ void *test_kern_addr;
+ size_t size = unconst + 1024;
+
+ one = kmalloc(size, GFP_KERNEL);
+ two = kmalloc(size, GFP_KERNEL);
+ if (!one || !two) {
+ pr_warn("Failed to allocate kernel memory\n");
+ goto free_kernel;
+ }
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_kernel;
+ }
+
+ memset(one, 'A', size);
+ memset(two, 'B', size);
+
+ test_user_addr = (void __user *)(user_addr + 16);
+ test_kern_addr = one + 16;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user of correct size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user of too large size\n");
+ if (copy_to_user(test_user_addr, test_kern_addr, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user of correct size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user of too large size\n");
+ if (copy_from_user(test_kern_addr, test_user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+ kfree(one);
+ kfree(two);
+}
+
+/*
+ * This checks for the specific whitelist window within an object. If this
+ * test passes, then do_usercopy_heap_size() tests will pass too.
+ */
+static void do_usercopy_heap_whitelist(bool to_user)
+{
+ unsigned long user_alloc;
+ unsigned char *buf = NULL;
+ unsigned char __user *user_addr;
+ size_t offset, size;
+
+ /* Make sure cache was prepared. */
+ if (!whitelist_cache) {
+ pr_warn("Failed to allocate kernel cache\n");
+ return;
+ }
+
+ /*
+ * Allocate a buffer with a whitelisted window in the buffer.
+ */
+ buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("Failed to allocate buffer from whitelist cache\n");
+ goto free_alloc;
+ }
+
+ /* Allocate user memory we'll poke at. */
+ user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_alloc >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ goto free_alloc;
+ }
+ user_addr = (void __user *)user_alloc;
+
+ memset(buf, 'B', cache_size);
+
+ /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
+ offset = (cache_size / 4) + unconst;
+ size = (cache_size / 16) + unconst;
+
+ if (to_user) {
+ pr_info("attempting good copy_to_user inside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset, size)) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user outside whitelist\n");
+ if (copy_to_user(user_addr, buf + offset - 1, size)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ } else {
+ pr_info("attempting good copy_from_user inside whitelist\n");
+ if (copy_from_user(buf + offset, user_addr, size)) {
+ pr_warn("copy_from_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_from_user outside whitelist\n");
+ if (copy_from_user(buf + offset - 1, user_addr, size)) {
+ pr_warn("copy_from_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ }
+
+free_user:
+ vm_munmap(user_alloc, PAGE_SIZE);
+free_alloc:
+ if (buf)
+ kmem_cache_free(whitelist_cache, buf);
+}
+
+/* Callable tests. */
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+{
+ do_usercopy_heap_size(true);
+}
+
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+{
+ do_usercopy_heap_size(false);
+}
+
+void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+{
+ do_usercopy_heap_whitelist(true);
+}
+
+void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+{
+ do_usercopy_heap_whitelist(false);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+ do_usercopy_stack(true, true);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+ do_usercopy_stack(false, true);
+}
+
+void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+ do_usercopy_stack(true, false);
+}
+
+void lkdtm_USERCOPY_KERNEL(void)
+{
+ unsigned long user_addr;
+
+ user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (user_addr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ pr_info("attempting good copy_to_user from kernel rodata: %px\n",
+ test_text);
+ if (copy_to_user((void __user *)user_addr, test_text,
+ unconst + sizeof(test_text))) {
+ pr_warn("copy_to_user failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user from kernel text: %px\n",
+ vm_mmap);
+ if (copy_to_user((void __user *)user_addr, vm_mmap,
+ unconst + PAGE_SIZE)) {
+ pr_warn("copy_to_user failed, but lacked Oops\n");
+ goto free_user;
+ }
+ pr_err("FAIL: survived bad copy_to_user()\n");
+
+free_user:
+ vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+ /* Prepare cache that lacks SLAB_USERCOPY flag. */
+ whitelist_cache =
+ kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
+ 0, 0,
+ cache_size / 4,
+ cache_size / 16,
+ NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+ kmem_cache_destroy(whitelist_cache);
+}