aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/lkdtm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-06-03 11:36:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-06-03 11:36:34 -0700
commit6f9b5ed8caddfbc94af8307c557ed57a8ec5c65c (patch)
treedda90fb07a35fc07cf0cedc22f0f6be61b29cf53 /drivers/misc/lkdtm
parent54c2cc79194c961a213c1d375fe3aa4165664cc4 (diff)
parent90de6805267f8c79cd2b1a36805071e257c39b5c (diff)
downloadlinux-6f9b5ed8caddfbc94af8307c557ed57a8ec5c65c.tar.gz
Merge tag 'char-misc-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc / other smaller driver subsystem updates from Greg KH: "Here is the large set of char, misc, and other driver subsystem updates for 5.19-rc1. The merge request for this has been delayed as I wanted to get lots of linux-next testing due to some late arrivals of changes for the habannalabs driver. Highlights of this merge are: - habanalabs driver updates for new hardware types and fixes and other updates - IIO driver tree merge which includes loads of new IIO drivers and cleanups and additions - PHY driver tree merge with new drivers and small updates to existing ones - interconnect driver tree merge with fixes and updates - soundwire driver tree merge with some small fixes - coresight driver tree merge with small fixes and updates - mhi bus driver tree merge with lots of updates and new device support - firmware driver updates - fpga driver updates - lkdtm driver updates (with a merge conflict, more on that below) - extcon driver tree merge with small updates - lots of other tiny driver updates and fixes and cleanups, full details in the shortlog. All of these have been in linux-next for almost 2 weeks with no reported problems" * tag 'char-misc-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (387 commits) habanalabs: use separate structure info for each error collect data habanalabs: fix missing handle shift during mmap habanalabs: remove hdev from hl_ctx_get args habanalabs: do MMU prefetch as deferred work habanalabs: order memory manager messages habanalabs: return -EFAULT on copy_to_user error habanalabs: use NULL for eventfd habanalabs: update firmware header habanalabs: add support for notification via eventfd habanalabs: add topic to memory manager buffer habanalabs: handle race in driver fini habanalabs: add device memory scrub ability through debugfs habanalabs: use unified memory manager for CB flow habanalabs: unified memory manager new code for CB flow habanalabs/gaudi: set arbitration timeout to a high value habanalabs: add put by handle method to memory manager habanalabs: hide memory manager page shift habanalabs: Add separate poll interval value for protocol habanalabs: use get_task_pid() to take PID habanalabs: add prefetch flag to the MAP operation ...
Diffstat (limited to 'drivers/misc/lkdtm')
-rw-r--r--drivers/misc/lkdtm/bugs.c96
-rw-r--r--drivers/misc/lkdtm/cfi.c145
-rw-r--r--drivers/misc/lkdtm/core.c138
-rw-r--r--drivers/misc/lkdtm/fortify.c17
-rw-r--r--drivers/misc/lkdtm/heap.c48
-rw-r--r--drivers/misc/lkdtm/lkdtm.h142
-rw-r--r--drivers/misc/lkdtm/perms.c47
-rw-r--r--drivers/misc/lkdtm/powerpc.c11
-rw-r--r--drivers/misc/lkdtm/refcount.c65
-rw-r--r--drivers/misc/lkdtm/stackleak.c13
-rw-r--r--drivers/misc/lkdtm/usercopy.c146
11 files changed, 560 insertions, 308 deletions
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index f21854ac5cc2b..009239ad1d8a8 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -68,40 +68,40 @@ void __init lkdtm_bugs_init(int *recur_param)
recur_count = *recur_param;
}
-void lkdtm_PANIC(void)
+static void lkdtm_PANIC(void)
{
panic("dumptest");
}
-void lkdtm_BUG(void)
+static void lkdtm_BUG(void)
{
BUG();
}
static int warn_counter;
-void lkdtm_WARNING(void)
+static void lkdtm_WARNING(void)
{
WARN_ON(++warn_counter);
}
-void lkdtm_WARNING_MESSAGE(void)
+static void lkdtm_WARNING_MESSAGE(void)
{
WARN(1, "Warning message trigger count: %d\n", ++warn_counter);
}
-void lkdtm_EXCEPTION(void)
+static void lkdtm_EXCEPTION(void)
{
*((volatile int *) 0) = 0;
}
-void lkdtm_LOOP(void)
+static void lkdtm_LOOP(void)
{
for (;;)
;
}
-void lkdtm_EXHAUST_STACK(void)
+static void lkdtm_EXHAUST_STACK(void)
{
pr_info("Calling function with %lu frame size to depth %d ...\n",
REC_STACK_SIZE, recur_count);
@@ -115,7 +115,7 @@ static noinline void __lkdtm_CORRUPT_STACK(void *stack)
}
/* This should trip the stack canary, not corrupt the return address. */
-noinline void lkdtm_CORRUPT_STACK(void)
+static noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *));
@@ -125,7 +125,7 @@ noinline void lkdtm_CORRUPT_STACK(void)
}
/* Same as above but will only get a canary with -fstack-protector-strong */
-noinline void lkdtm_CORRUPT_STACK_STRONG(void)
+static noinline void lkdtm_CORRUPT_STACK_STRONG(void)
{
union {
unsigned short shorts[4];
@@ -139,7 +139,7 @@ noinline void lkdtm_CORRUPT_STACK_STRONG(void)
static pid_t stack_pid;
static unsigned long stack_addr;
-void lkdtm_REPORT_STACK(void)
+static void lkdtm_REPORT_STACK(void)
{
volatile uintptr_t magic;
pid_t pid = task_pid_nr(current);
@@ -222,7 +222,7 @@ static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
}
}
-void lkdtm_REPORT_STACK_CANARY(void)
+static void lkdtm_REPORT_STACK_CANARY(void)
{
/* Use default char array length that triggers stack protection. */
char data[8] __aligned(sizeof(void *)) = { };
@@ -230,7 +230,7 @@ void lkdtm_REPORT_STACK_CANARY(void)
__lkdtm_REPORT_STACK_CANARY((void *)&data);
}
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+static void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
@@ -245,21 +245,21 @@ void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
pr_err("XFAIL: arch has CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS\n");
}
-void lkdtm_SOFTLOCKUP(void)
+static void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
-void lkdtm_HARDLOCKUP(void)
+static void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
-void lkdtm_SPINLOCKUP(void)
+static void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
@@ -267,7 +267,7 @@ void lkdtm_SPINLOCKUP(void)
__release(&lock_me_up);
}
-void lkdtm_HUNG_TASK(void)
+static void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
@@ -276,7 +276,7 @@ void lkdtm_HUNG_TASK(void)
volatile unsigned int huge = INT_MAX - 2;
volatile unsigned int ignored;
-void lkdtm_OVERFLOW_SIGNED(void)
+static void lkdtm_OVERFLOW_SIGNED(void)
{
int value;
@@ -291,7 +291,7 @@ void lkdtm_OVERFLOW_SIGNED(void)
}
-void lkdtm_OVERFLOW_UNSIGNED(void)
+static void lkdtm_OVERFLOW_UNSIGNED(void)
{
unsigned int value;
@@ -319,7 +319,7 @@ struct array_bounds {
int three;
};
-void lkdtm_ARRAY_BOUNDS(void)
+static void lkdtm_ARRAY_BOUNDS(void)
{
struct array_bounds_flex_array *not_checked;
struct array_bounds *checked;
@@ -327,6 +327,11 @@ void lkdtm_ARRAY_BOUNDS(void)
not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+ if (!not_checked || !checked) {
+ kfree(not_checked);
+ kfree(checked);
+ return;
+ }
pr_info("Array access within bounds ...\n");
/* For both, touch all bytes in the actual member size. */
@@ -346,10 +351,13 @@ void lkdtm_ARRAY_BOUNDS(void)
kfree(not_checked);
kfree(checked);
pr_err("FAIL: survived array bounds overflow!\n");
- pr_expected_config(CONFIG_UBSAN_BOUNDS);
+ if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
+ pr_expected_config(CONFIG_UBSAN_TRAP);
+ else
+ pr_expected_config(CONFIG_UBSAN_BOUNDS);
}
-void lkdtm_CORRUPT_LIST_ADD(void)
+static void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
* Initially, an empty list via LIST_HEAD:
@@ -389,7 +397,7 @@ void lkdtm_CORRUPT_LIST_ADD(void)
}
}
-void lkdtm_CORRUPT_LIST_DEL(void)
+static void lkdtm_CORRUPT_LIST_DEL(void)
{
LIST_HEAD(test_head);
struct lkdtm_list item;
@@ -417,7 +425,7 @@ void lkdtm_CORRUPT_LIST_DEL(void)
}
/* Test that VMAP_STACK is actually allocating with a leading guard page */
-void lkdtm_STACK_GUARD_PAGE_LEADING(void)
+static void lkdtm_STACK_GUARD_PAGE_LEADING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack - 1;
@@ -431,7 +439,7 @@ void lkdtm_STACK_GUARD_PAGE_LEADING(void)
}
/* Test that VMAP_STACK is actually allocating with a trailing guard page */
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
+static void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
{
const unsigned char *stack = task_stack_page(current);
const unsigned char *ptr = stack + THREAD_SIZE;
@@ -444,7 +452,7 @@ void lkdtm_STACK_GUARD_PAGE_TRAILING(void)
pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte);
}
-void lkdtm_UNSET_SMEP(void)
+static void lkdtm_UNSET_SMEP(void)
{
#if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML)
#define MOV_CR4_DEPTH 64
@@ -510,7 +518,7 @@ void lkdtm_UNSET_SMEP(void)
#endif
}
-void lkdtm_DOUBLE_FAULT(void)
+static void lkdtm_DOUBLE_FAULT(void)
{
#if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML)
/*
@@ -558,7 +566,7 @@ static noinline void change_pac_parameters(void)
}
#endif
-noinline void lkdtm_CORRUPT_PAC(void)
+static noinline void lkdtm_CORRUPT_PAC(void)
{
#ifdef CONFIG_ARM64
#define CORRUPT_PAC_ITERATE 10
@@ -586,3 +594,37 @@ noinline void lkdtm_CORRUPT_PAC(void)
pr_err("XFAIL: this test is arm64-only\n");
#endif
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PANIC),
+ CRASHTYPE(BUG),
+ CRASHTYPE(WARNING),
+ CRASHTYPE(WARNING_MESSAGE),
+ CRASHTYPE(EXCEPTION),
+ CRASHTYPE(LOOP),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
+ CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
+ CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+ CRASHTYPE(SOFTLOCKUP),
+ CRASHTYPE(HARDLOCKUP),
+ CRASHTYPE(SPINLOCKUP),
+ CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
+ CRASHTYPE(CORRUPT_LIST_ADD),
+ CRASHTYPE(CORRUPT_LIST_DEL),
+ CRASHTYPE(STACK_GUARD_PAGE_LEADING),
+ CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
+ CRASHTYPE(UNSET_SMEP),
+ CRASHTYPE(DOUBLE_FAULT),
+ CRASHTYPE(CORRUPT_PAC),
+};
+
+struct crashtype_category bugs_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c
index c9aeddef1044f..666a7f4bc1375 100644
--- a/drivers/misc/lkdtm/cfi.c
+++ b/drivers/misc/lkdtm/cfi.c
@@ -3,6 +3,7 @@
* This is for all the tests relating directly to Control Flow Integrity.
*/
#include "lkdtm.h"
+#include <asm/page.h>
static int called_count;
@@ -22,7 +23,7 @@ static noinline int lkdtm_increment_int(int *counter)
/*
* This tries to call an indirect function with a mismatched prototype.
*/
-void lkdtm_CFI_FORWARD_PROTO(void)
+static void lkdtm_CFI_FORWARD_PROTO(void)
{
/*
* Matches lkdtm_increment_void()'s prototype, but not
@@ -41,3 +42,145 @@ void lkdtm_CFI_FORWARD_PROTO(void)
pr_err("FAIL: survived mismatched prototype function call!\n");
pr_expected_config(CONFIG_CFI_CLANG);
}
+
+/*
+ * This can stay local to LKDTM, as there should not be a production reason
+ * to disable PAC && SCS.
+ */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+# ifdef CONFIG_ARM64_BTI_KERNEL
+# define __no_pac "branch-protection=bti"
+# else
+# define __no_pac "branch-protection=none"
+# endif
+# define __no_ret_protection __noscs __attribute__((__target__(__no_pac)))
+#else
+# define __no_ret_protection __noscs
+#endif
+
+#define no_pac_addr(addr) \
+ ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
+
+/* The ultimate ROP gadget. */
+static noinline __no_ret_protection
+void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static noinline
+void set_return_addr(unsigned long *expected, unsigned long *addr)
+{
+ /* Use of volatile is to make sure final write isn't seen as a dead store. */
+ unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+
+ /* Make sure we've found the right place on the stack before writing it. */
+ if (no_pac_addr(*ret_addr) == expected)
+ *ret_addr = (addr);
+ else
+ /* Check architecture, stack layout, or compiler behavior... */
+ pr_warn("Eek: return address mismatch! %px != %px\n",
+ *ret_addr, addr);
+}
+
+static volatile int force_check;
+
+static void lkdtm_CFI_BACKWARD(void)
+{
+ /* Use calculated gotos to keep labels addressable. */
+ void *labels[] = {0, &&normal, &&redirected, &&check_normal, &&check_redirected};
+
+ pr_info("Attempting unchecked stack return address redirection ...\n");
+
+ /* Always false */
+ if (force_check) {
+ /*
+ * Prepare to call with NULLs to avoid parameters being treated as
+ * constants in -02.
+ */
+ set_return_addr_unchecked(NULL, NULL);
+ set_return_addr(NULL, NULL);
+ if (force_check)
+ goto *labels[1];
+ if (force_check)
+ goto *labels[2];
+ if (force_check)
+ goto *labels[3];
+ if (force_check)
+ goto *labels[4];
+ return;
+ }
+
+ /*
+ * Use fallthrough switch case to keep basic block ordering between
+ * set_return_addr*() and the label after it.
+ */
+ switch (force_check) {
+ case 0:
+ set_return_addr_unchecked(&&normal, &&redirected);
+ fallthrough;
+ case 1:
+normal:
+ /* Always true */
+ if (!force_check) {
+ pr_err("FAIL: stack return address manipulation failed!\n");
+ /* If we can't redirect "normally", we can't test mitigations. */
+ return;
+ }
+ break;
+ default:
+redirected:
+ pr_info("ok: redirected stack return address.\n");
+ break;
+ }
+
+ pr_info("Attempting checked stack return address redirection ...\n");
+
+ switch (force_check) {
+ case 0:
+ set_return_addr(&&check_normal, &&check_redirected);
+ fallthrough;
+ case 1:
+check_normal:
+ /* Always true */
+ if (!force_check) {
+ pr_info("ok: control flow unchanged.\n");
+ return;
+ }
+
+check_redirected:
+ pr_err("FAIL: stack return address was redirected!\n");
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL)) {
+ pr_expected_config(CONFIG_ARM64_PTR_AUTH_KERNEL);
+ return;
+ }
+ if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK)) {
+ pr_expected_config(CONFIG_SHADOW_CALL_STACK);
+ return;
+ }
+ pr_warn("This is probably expected, since this %s was built *without* %s=y nor %s=y\n",
+ lkdtm_kernel_info,
+ "CONFIG_ARM64_PTR_AUTH_KERNEL", "CONFIG_SHADOW_CALL_STACK");
+}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(CFI_FORWARD_PROTO),
+ CRASHTYPE(CFI_BACKWARD),
+};
+
+struct crashtype_category cfi_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index e2228b6fc09bb..b4712ff196b4e 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -86,109 +86,21 @@ static struct crashpoint crashpoints[] = {
#endif
};
-
-/* Crash types. */
-struct crashtype {
- const char *name;
- void (*func)(void);
-};
-
-#define CRASHTYPE(_name) \
- { \
- .name = __stringify(_name), \
- .func = lkdtm_ ## _name, \
- }
-
-/* Define the possible types of crashes that can be triggered. */
-static const struct crashtype crashtypes[] = {
- CRASHTYPE(PANIC),
- CRASHTYPE(BUG),
- CRASHTYPE(WARNING),
- CRASHTYPE(WARNING_MESSAGE),
- CRASHTYPE(EXCEPTION),
- CRASHTYPE(LOOP),
- CRASHTYPE(EXHAUST_STACK),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
- CRASHTYPE(REPORT_STACK),
- CRASHTYPE(REPORT_STACK_CANARY),
- CRASHTYPE(CORRUPT_LIST_ADD),
- CRASHTYPE(CORRUPT_LIST_DEL),
- CRASHTYPE(STACK_GUARD_PAGE_LEADING),
- CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
- CRASHTYPE(UNSET_SMEP),
- CRASHTYPE(CORRUPT_PAC),
- CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
- CRASHTYPE(SLAB_LINEAR_OVERFLOW),
- CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
- CRASHTYPE(WRITE_AFTER_FREE),
- CRASHTYPE(READ_AFTER_FREE),
- CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
- CRASHTYPE(READ_BUDDY_AFTER_FREE),
- CRASHTYPE(SLAB_INIT_ON_ALLOC),
- CRASHTYPE(BUDDY_INIT_ON_ALLOC),
- CRASHTYPE(SLAB_FREE_DOUBLE),
- CRASHTYPE(SLAB_FREE_CROSS),
- CRASHTYPE(SLAB_FREE_PAGE),
- CRASHTYPE(SOFTLOCKUP),
- CRASHTYPE(HARDLOCKUP),
- CRASHTYPE(SPINLOCKUP),
- CRASHTYPE(HUNG_TASK),
- CRASHTYPE(OVERFLOW_SIGNED),
- CRASHTYPE(OVERFLOW_UNSIGNED),
- CRASHTYPE(ARRAY_BOUNDS),
- CRASHTYPE(EXEC_DATA),
- CRASHTYPE(EXEC_STACK),
- CRASHTYPE(EXEC_KMALLOC),
- CRASHTYPE(EXEC_VMALLOC),
- CRASHTYPE(EXEC_RODATA),
- CRASHTYPE(EXEC_USERSPACE),
- CRASHTYPE(EXEC_NULL),
- CRASHTYPE(ACCESS_USERSPACE),
- CRASHTYPE(ACCESS_NULL),
- CRASHTYPE(WRITE_RO),
- CRASHTYPE(WRITE_RO_AFTER_INIT),
- CRASHTYPE(WRITE_KERN),
- CRASHTYPE(WRITE_OPD),
- CRASHTYPE(REFCOUNT_INC_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
- CRASHTYPE(REFCOUNT_DEC_ZERO),
- CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
- CRASHTYPE(REFCOUNT_INC_ZERO),
- CRASHTYPE(REFCOUNT_ADD_ZERO),
- CRASHTYPE(REFCOUNT_INC_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_SATURATED),
- CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
- CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
- CRASHTYPE(REFCOUNT_TIMING),
- CRASHTYPE(ATOMIC_TIMING),
- CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
- CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO),
- CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM),
- CRASHTYPE(USERCOPY_STACK_FRAME_TO),
- CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
- CRASHTYPE(USERCOPY_STACK_BEYOND),
- CRASHTYPE(USERCOPY_KERNEL),
- CRASHTYPE(STACKLEAK_ERASING),
- CRASHTYPE(CFI_FORWARD_PROTO),
- CRASHTYPE(FORTIFIED_OBJECT),
- CRASHTYPE(FORTIFIED_SUBOBJECT),
- CRASHTYPE(FORTIFIED_STRSCPY),
- CRASHTYPE(DOUBLE_FAULT),
+/* List of possible types for crashes that can be triggered. */
+static const struct crashtype_category *crashtype_categories[] = {
+ &bugs_crashtypes,
+ &heap_crashtypes,
+ &perms_crashtypes,
+ &refcount_crashtypes,
+ &usercopy_crashtypes,
+ &stackleak_crashtypes,
+ &cfi_crashtypes,
+ &fortify_crashtypes,
#ifdef CONFIG_PPC_64S_HASH_MMU
- CRASHTYPE(PPC_SLB_MULTIHIT),
+ &powerpc_crashtypes,
#endif
};
-
/* Global kprobe entry and crashtype. */
static struct kprobe *lkdtm_kprobe;
static struct crashpoint *lkdtm_crashpoint;
@@ -223,11 +135,16 @@ char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
static const struct crashtype *find_crashtype(const char *name)
{
- int i;
+ int cat, idx;
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- if (!strcmp(name, crashtypes[i].name))
- return &crashtypes[i];
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ if (!strcmp(name, crashtype->name))
+ return crashtype;
+ }
}
return NULL;
@@ -347,17 +264,24 @@ static ssize_t lkdtm_debugfs_entry(struct file *f,
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off)
{
+ int n, cat, idx;
+ ssize_t out;
char *buf;
- int i, n, out;
buf = (char *)__get_free_page(GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
n = scnprintf(buf, PAGE_SIZE, "Available crash types:\n");
- for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
- n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
- crashtypes[i].name);
+
+ for (cat = 0; cat < ARRAY_SIZE(crashtype_categories); cat++) {
+ for (idx = 0; idx < crashtype_categories[cat]->len; idx++) {
+ struct crashtype *crashtype;
+
+ crashtype = &crashtype_categories[cat]->crashtypes[idx];
+ n += scnprintf(buf + n, PAGE_SIZE - n, "%s\n",
+ crashtype->name);
+ }
}
buf[n] = '\0';
diff --git a/drivers/misc/lkdtm/fortify.c b/drivers/misc/lkdtm/fortify.c
index ab33bb5e2e7af..080293fa3c521 100644
--- a/drivers/misc/lkdtm/fortify.c
+++ b/drivers/misc/lkdtm/fortify.c
@@ -10,7 +10,7 @@
static volatile int fortify_scratch_space;
-void lkdtm_FORTIFIED_OBJECT(void)
+static void lkdtm_FORTIFIED_OBJECT(void)
{
struct target {
char a[10];
@@ -31,7 +31,7 @@ void lkdtm_FORTIFIED_OBJECT(void)
pr_expected_config(CONFIG_FORTIFY_SOURCE);
}
-void lkdtm_FORTIFIED_SUBOBJECT(void)
+static void lkdtm_FORTIFIED_SUBOBJECT(void)
{
struct target {
char a[10];
@@ -67,7 +67,7 @@ void lkdtm_FORTIFIED_SUBOBJECT(void)
* strscpy and generate a panic because there is a write overflow (i.e. src
* length is greater than dst length).
*/
-void lkdtm_FORTIFIED_STRSCPY(void)
+static void lkdtm_FORTIFIED_STRSCPY(void)
{
char *src;
char dst[5];
@@ -134,3 +134,14 @@ void lkdtm_FORTIFIED_STRSCPY(void)
kfree(src);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(FORTIFIED_OBJECT),
+ CRASHTYPE(FORTIFIED_SUBOBJECT),
+ CRASHTYPE(FORTIFIED_STRSCPY),
+};
+
+struct crashtype_category fortify_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 8a92f5a800faa..62516078a619f 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -22,8 +22,11 @@ static volatile int __offset = 1;
/*
* If there aren't guard pages, it's likely that a consecutive allocation will
* let us overflow into the second allocation without overwriting something real.
+ *
+ * This should always be caught because there is an unconditional unmapped
+ * page after vmap allocations.
*/
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
+static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
{
char *one, *two;
@@ -41,8 +44,11 @@ void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
+ *
+ * This should get caught by either memory tagging, KASan, or by using
+ * CONFIG_SLUB_DEBUG=y and slub_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
*/
-void lkdtm_SLAB_LINEAR_OVERFLOW(void)
+static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
@@ -50,11 +56,12 @@ void lkdtm_SLAB_LINEAR_OVERFLOW(void)
return;
pr_info("Attempting slab linear overflow ...\n");
+ OPTIMIZER_HIDE_VAR(data);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
-void lkdtm_WRITE_AFTER_FREE(void)
+static void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
@@ -80,7 +87,7 @@ void lkdtm_WRITE_AFTER_FREE(void)
pr_info("Hmm, didn't get the same memory range.\n");
}
-void lkdtm_READ_AFTER_FREE(void)
+static void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
@@ -124,7 +131,7 @@ void lkdtm_READ_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
@@ -144,7 +151,7 @@ void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
schedule();
}
-void lkdtm_READ_BUDDY_AFTER_FREE(void)
+static void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
@@ -181,7 +188,7 @@ void lkdtm_READ_BUDDY_AFTER_FREE(void)
kfree(val);
}
-void lkdtm_SLAB_INIT_ON_ALLOC(void)
+static void lkdtm_SLAB_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -213,7 +220,7 @@ void lkdtm_SLAB_INIT_ON_ALLOC(void)
kfree(val);
}
-void lkdtm_BUDDY_INIT_ON_ALLOC(void)
+static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
{
u8 *first;
u8 *val;
@@ -246,7 +253,7 @@ void lkdtm_BUDDY_INIT_ON_ALLOC(void)
free_page((unsigned long)val);
}
-void lkdtm_SLAB_FREE_DOUBLE(void)
+static void lkdtm_SLAB_FREE_DOUBLE(void)
{
int *val;
@@ -263,7 +270,7 @@ void lkdtm_SLAB_FREE_DOUBLE(void)
kmem_cache_free(double_free_cache, val);
}
-void lkdtm_SLAB_FREE_CROSS(void)
+static void lkdtm_SLAB_FREE_CROSS(void)
{
int *val;
@@ -279,7 +286,7 @@ void lkdtm_SLAB_FREE_CROSS(void)
kmem_cache_free(b_cache, val);
}
-void lkdtm_SLAB_FREE_PAGE(void)
+static void lkdtm_SLAB_FREE_PAGE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
@@ -313,3 +320,22 @@ void __exit lkdtm_heap_exit(void)
kmem_cache_destroy(a_cache);
kmem_cache_destroy(b_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(SLAB_LINEAR_OVERFLOW),
+ CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
+ CRASHTYPE(WRITE_AFTER_FREE),
+ CRASHTYPE(READ_AFTER_FREE),
+ CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+ CRASHTYPE(READ_BUDDY_AFTER_FREE),
+ CRASHTYPE(SLAB_INIT_ON_ALLOC),
+ CRASHTYPE(BUDDY_INIT_ON_ALLOC),
+ CRASHTYPE(SLAB_FREE_DOUBLE),
+ CRASHTYPE(SLAB_FREE_CROSS),
+ CRASHTYPE(SLAB_FREE_PAGE),
+};
+
+struct crashtype_category heap_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 305fc2ec3f256..015e0484026b2 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -9,19 +9,19 @@
extern char *lkdtm_kernel_info;
#define pr_expected_config(kconfig) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) \
pr_err("Unexpected! This %s was built with " #kconfig "=y\n", \
lkdtm_kernel_info); \
else \
pr_warn("This is probably expected, since this %s was built *without* " #kconfig "=y\n", \
lkdtm_kernel_info); \
-}
+} while (0)
#ifndef MODULE
int lkdtm_check_bool_cmdline(const char *param);
#define pr_expected_config_param(kconfig, param) \
-{ \
+do { \
if (IS_ENABLED(kconfig)) { \
switch (lkdtm_check_bool_cmdline(param)) { \
case 0: \
@@ -52,119 +52,49 @@ int lkdtm_check_bool_cmdline(const char *param);
break; \
} \
} \
-}
+} while (0)
#else
#define pr_expected_config_param(kconfig, param) pr_expected_config(kconfig)
#endif
-/* bugs.c */
+/* Crash types. */
+struct crashtype {
+ const char *name;
+ void (*func)(void);
+};
+
+#define CRASHTYPE(_name) \
+ { \
+ .name = __stringify(_name), \
+ .func = lkdtm_ ## _name, \
+ }
+
+/* Category's collection of crashtypes. */
+struct crashtype_category {
+ struct crashtype *crashtypes;
+ size_t len;
+};
+
+/* Each category's crashtypes list. */
+extern struct crashtype_category bugs_crashtypes;
+extern struct crashtype_category heap_crashtypes;
+extern struct crashtype_category perms_crashtypes;
+extern struct crashtype_category refcount_crashtypes;
+extern struct crashtype_category usercopy_crashtypes;
+extern struct crashtype_category stackleak_crashtypes;
+extern struct crashtype_category cfi_crashtypes;
+extern struct crashtype_category fortify_crashtypes;
+extern struct crashtype_category powerpc_crashtypes;
+
+/* Each category's init/exit routines. */
void __init lkdtm_bugs_init(int *recur_param);
-void lkdtm_PANIC(void);
-void lkdtm_BUG(void);
-void lkdtm_WARNING(void);
-void lkdtm_WARNING_MESSAGE(void);
-void lkdtm_EXCEPTION(void);
-void lkdtm_LOOP(void);
-void lkdtm_EXHAUST_STACK(void);
-void lkdtm_CORRUPT_STACK(void);
-void lkdtm_CORRUPT_STACK_STRONG(void);
-void lkdtm_REPORT_STACK(void);
-void lkdtm_REPORT_STACK_CANARY(void);
-void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
-void lkdtm_SOFTLOCKUP(void);
-void lkdtm_HARDLOCKUP(void);
-void lkdtm_SPINLOCKUP(void);
-void lkdtm_HUNG_TASK(void);
-void lkdtm_OVERFLOW_SIGNED(void);
-void lkdtm_OVERFLOW_UNSIGNED(void);
-void lkdtm_ARRAY_BOUNDS(void);
-void lkdtm_CORRUPT_LIST_ADD(void);
-void lkdtm_CORRUPT_LIST_DEL(void);
-void lkdtm_STACK_GUARD_PAGE_LEADING(void);
-void lkdtm_STACK_GUARD_PAGE_TRAILING(void);
-void lkdtm_UNSET_SMEP(void);
-void lkdtm_DOUBLE_FAULT(void);
-void lkdtm_CORRUPT_PAC(void);
-
-/* heap.c */
void __init lkdtm_heap_init(void);
void __exit lkdtm_heap_exit(void);
-void lkdtm_VMALLOC_LINEAR_OVERFLOW(void);
-void lkdtm_SLAB_LINEAR_OVERFLOW(void);
-void lkdtm_WRITE_AFTER_FREE(void);
-void lkdtm_READ_AFTER_FREE(void);
-void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
-void lkdtm_READ_BUDDY_AFTER_FREE(void);
-void lkdtm_SLAB_INIT_ON_ALLOC(void);
-void lkdtm_BUDDY_INIT_ON_ALLOC(void);
-void lkdtm_SLAB_FREE_DOUBLE(void);
-void lkdtm_SLAB_FREE_CROSS(void);
-void lkdtm_SLAB_FREE_PAGE(void);
-
-/* perms.c */
void __init lkdtm_perms_init(void);
-void lkdtm_WRITE_RO(void);
-void lkdtm_WRITE_RO_AFTER_INIT(void);
-void lkdtm_WRITE_KERN(void);
-void lkdtm_WRITE_OPD(void);
-void lkdtm_EXEC_DATA(void);
-void lkdtm_EXEC_STACK(void);
-void lkdtm_EXEC_KMALLOC(void);
-void lkdtm_EXEC_VMALLOC(void);
-void lkdtm_EXEC_RODATA(void);
-void lkdtm_EXEC_USERSPACE(void);
-void lkdtm_EXEC_NULL(void);
-void lkdtm_ACCESS_USERSPACE(void);
-void lkdtm_ACCESS_NULL(void);
-
-/* refcount.c */
-void lkdtm_REFCOUNT_INC_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void);
-void lkdtm_REFCOUNT_DEC_ZERO(void);
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void);
-void lkdtm_REFCOUNT_INC_ZERO(void);
-void lkdtm_REFCOUNT_ADD_ZERO(void);
-void lkdtm_REFCOUNT_INC_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_SATURATED(void);
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void);
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void);
-void lkdtm_REFCOUNT_TIMING(void);
-void lkdtm_ATOMIC_TIMING(void);
-
-/* rodata.c */
-void lkdtm_rodata_do_nothing(void);
-
-/* usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void);
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void);
-void lkdtm_USERCOPY_STACK_FRAME_TO(void);
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
-void lkdtm_USERCOPY_STACK_BEYOND(void);
-void lkdtm_USERCOPY_KERNEL(void);
-
-/* stackleak.c */
-void lkdtm_STACKLEAK_ERASING(void);
-
-/* cfi.c */
-void lkdtm_CFI_FORWARD_PROTO(void);
-/* fortify.c */
-void lkdtm_FORTIFIED_OBJECT(void);
-void lkdtm_FORTIFIED_SUBOBJECT(void);
-void lkdtm_FORTIFIED_STRSCPY(void);
-
-/* powerpc.c */
-void lkdtm_PPC_SLB_MULTIHIT(void);
+/* Special declaration for function-in-rodata. */
+void lkdtm_rodata_do_nothing(void);
#endif
diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c
index 2c6aba3ff32b9..b93404d656509 100644
--- a/drivers/misc/lkdtm/perms.c
+++ b/drivers/misc/lkdtm/perms.c
@@ -103,7 +103,7 @@ static void execute_user_location(void *dst)
pr_err("FAIL: func returned\n");
}
-void lkdtm_WRITE_RO(void)
+static void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test and make volatile. */
volatile unsigned long *ptr = (unsigned long *)&rodata;
@@ -113,7 +113,7 @@ void lkdtm_WRITE_RO(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_RO_AFTER_INIT(void)
+static void lkdtm_WRITE_RO_AFTER_INIT(void)
{
volatile unsigned long *ptr = &ro_after_init;
@@ -132,7 +132,7 @@ void lkdtm_WRITE_RO_AFTER_INIT(void)
pr_err("FAIL: survived bad write\n");
}
-void lkdtm_WRITE_KERN(void)
+static void lkdtm_WRITE_KERN(void)
{
size_t size;
volatile unsigned char *ptr;
@@ -149,7 +149,7 @@ void lkdtm_WRITE_KERN(void)
do_overwritten();
}
-void lkdtm_WRITE_OPD(void)
+static void lkdtm_WRITE_OPD(void)
{
size_t size = sizeof(func_desc_t);
void (*func)(void) = do_nothing;
@@ -166,38 +166,38 @@ void lkdtm_WRITE_OPD(void)
func();
}
-void lkdtm_EXEC_DATA(void)
+static void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
-void lkdtm_EXEC_STACK(void)
+static void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
-void lkdtm_EXEC_KMALLOC(void)
+static void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
-void lkdtm_EXEC_VMALLOC(void)
+static void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
-void lkdtm_EXEC_RODATA(void)
+static void lkdtm_EXEC_RODATA(void)
{
execute_location(dereference_function_descriptor(lkdtm_rodata_do_nothing),
CODE_AS_IS);
}
-void lkdtm_EXEC_USERSPACE(void)
+static void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
@@ -212,12 +212,12 @@ void lkdtm_EXEC_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_EXEC_NULL(void)
+static void lkdtm_EXEC_NULL(void)
{
execute_location(NULL, CODE_AS_IS);
}
-void lkdtm_ACCESS_USERSPACE(void)
+static void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
@@ -250,7 +250,7 @@ void lkdtm_ACCESS_USERSPACE(void)
vm_munmap(user_addr, PAGE_SIZE);
}
-void lkdtm_ACCESS_NULL(void)
+static void lkdtm_ACCESS_NULL(void)
{
unsigned long tmp;
volatile unsigned long *ptr = (unsigned long *)NULL;
@@ -270,3 +270,24 @@ void __init lkdtm_perms_init(void)
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(WRITE_RO),
+ CRASHTYPE(WRITE_RO_AFTER_INIT),
+ CRASHTYPE(WRITE_KERN),
+ CRASHTYPE(WRITE_OPD),
+ CRASHTYPE(EXEC_DATA),
+ CRASHTYPE(EXEC_STACK),
+ CRASHTYPE(EXEC_KMALLOC),
+ CRASHTYPE(EXEC_VMALLOC),
+ CRASHTYPE(EXEC_RODATA),
+ CRASHTYPE(EXEC_USERSPACE),
+ CRASHTYPE(EXEC_NULL),
+ CRASHTYPE(ACCESS_USERSPACE),
+ CRASHTYPE(ACCESS_NULL),
+};
+
+struct crashtype_category perms_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
index 077c9f9ed8d08..be385449911a8 100644
--- a/drivers/misc/lkdtm/powerpc.c
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -100,7 +100,7 @@ static void insert_dup_slb_entry_0(void)
preempt_enable();
}
-void lkdtm_PPC_SLB_MULTIHIT(void)
+static void lkdtm_PPC_SLB_MULTIHIT(void)
{
if (!radix_enabled()) {
pr_info("Injecting SLB multihit errors\n");
@@ -118,3 +118,12 @@ void lkdtm_PPC_SLB_MULTIHIT(void)
pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
}
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+};
+
+struct crashtype_category powerpc_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/refcount.c b/drivers/misc/lkdtm/refcount.c
index de7c5ab528d9e..5cd488f54cfa5 100644
--- a/drivers/misc/lkdtm/refcount.c
+++ b/drivers/misc/lkdtm/refcount.c
@@ -24,7 +24,7 @@ static void overflow_check(refcount_t *ref)
* A refcount_inc() above the maximum value of the refcount implementation,
* should at least saturate, and at most also WARN.
*/
-void lkdtm_REFCOUNT_INC_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -40,7 +40,7 @@ void lkdtm_REFCOUNT_INC_OVERFLOW(void)
}
/* refcount_add() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX - 1);
@@ -58,7 +58,7 @@ void lkdtm_REFCOUNT_ADD_OVERFLOW(void)
}
/* refcount_inc_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -70,7 +70,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_OVERFLOW(void)
}
/* refcount_add_not_zero() should behave just like refcount_inc() above. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_OVERFLOW(void)
{
refcount_t over = REFCOUNT_INIT(REFCOUNT_MAX);
@@ -103,7 +103,7 @@ static void check_zero(refcount_t *ref)
* zero it should either saturate (when inc-from-zero isn't protected)
* or stay at zero (when inc-from-zero is protected) and should WARN for both.
*/
-void lkdtm_REFCOUNT_DEC_ZERO(void)
+static void lkdtm_REFCOUNT_DEC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(2);
@@ -142,7 +142,7 @@ static void check_negative(refcount_t *ref, int start)
}
/* A refcount_dec() going negative should saturate and may WARN. */
-void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -156,7 +156,7 @@ void lkdtm_REFCOUNT_DEC_NEGATIVE(void)
* A refcount_dec_and_test() should act like refcount_dec() above when
* going negative.
*/
-void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(0);
@@ -171,7 +171,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_NEGATIVE(void)
* A refcount_sub_and_test() should act like refcount_dec_and_test()
* above when going negative.
*/
-void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_NEGATIVE(void)
{
refcount_t neg = REFCOUNT_INIT(3);
@@ -203,7 +203,7 @@ static void check_from_zero(refcount_t *ref)
/*
* A refcount_inc() from zero should pin to zero or saturate and may WARN.
*/
-void lkdtm_REFCOUNT_INC_ZERO(void)
+static void lkdtm_REFCOUNT_INC_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -228,7 +228,7 @@ void lkdtm_REFCOUNT_INC_ZERO(void)
* A refcount_add() should act like refcount_inc() above when starting
* at zero.
*/
-void lkdtm_REFCOUNT_ADD_ZERO(void)
+static void lkdtm_REFCOUNT_ADD_ZERO(void)
{
refcount_t zero = REFCOUNT_INIT(0);
@@ -267,7 +267,7 @@ static void check_saturated(refcount_t *ref)
* A refcount_inc() from a saturated value should at most warn about
* being saturated already.
*/
-void lkdtm_REFCOUNT_INC_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -278,7 +278,7 @@ void lkdtm_REFCOUNT_INC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -289,7 +289,7 @@ void lkdtm_REFCOUNT_DEC_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -300,7 +300,7 @@ void lkdtm_REFCOUNT_ADD_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -312,7 +312,7 @@ void lkdtm_REFCOUNT_INC_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
+static void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -324,7 +324,7 @@ void lkdtm_REFCOUNT_ADD_NOT_ZERO_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -336,7 +336,7 @@ void lkdtm_REFCOUNT_DEC_AND_TEST_SATURATED(void)
}
/* Should act like refcount_inc() above from saturated. */
-void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
+static void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
{
refcount_t sat = REFCOUNT_INIT(REFCOUNT_SATURATED);
@@ -348,7 +348,7 @@ void lkdtm_REFCOUNT_SUB_AND_TEST_SATURATED(void)
}
/* Used to time the existing atomic_t when used for reference counting */
-void lkdtm_ATOMIC_TIMING(void)
+static void lkdtm_ATOMIC_TIMING(void)
{
unsigned int i;
atomic_t count = ATOMIC_INIT(1);
@@ -373,7 +373,7 @@ void lkdtm_ATOMIC_TIMING(void)
* cd /sys/kernel/debug/provoke-crash
* perf stat -B -- cat <(echo REFCOUNT_TIMING) > DIRECT
*/
-void lkdtm_REFCOUNT_TIMING(void)
+static void lkdtm_REFCOUNT_TIMING(void)
{
unsigned int i;
refcount_t count = REFCOUNT_INIT(1);
@@ -390,3 +390,30 @@ void lkdtm_REFCOUNT_TIMING(void)
else
pr_info("refcount timing: done\n");
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(REFCOUNT_INC_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_OVERFLOW),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_OVERFLOW),
+ CRASHTYPE(REFCOUNT_DEC_ZERO),
+ CRASHTYPE(REFCOUNT_DEC_NEGATIVE),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_NEGATIVE),
+ CRASHTYPE(REFCOUNT_INC_ZERO),
+ CRASHTYPE(REFCOUNT_ADD_ZERO),
+ CRASHTYPE(REFCOUNT_INC_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_SATURATED),
+ CRASHTYPE(REFCOUNT_INC_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_ADD_NOT_ZERO_SATURATED),
+ CRASHTYPE(REFCOUNT_DEC_AND_TEST_SATURATED),
+ CRASHTYPE(REFCOUNT_SUB_AND_TEST_SATURATED),
+ CRASHTYPE(ATOMIC_TIMING),
+ CRASHTYPE(REFCOUNT_TIMING),
+};
+
+struct crashtype_category refcount_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
index 82369c6f889e2..025b133297a6b 100644
--- a/drivers/misc/lkdtm/stackleak.c
+++ b/drivers/misc/lkdtm/stackleak.c
@@ -115,7 +115,7 @@ out:
}
}
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
unsigned long flags;
@@ -124,7 +124,7 @@ void lkdtm_STACKLEAK_ERASING(void)
local_irq_restore(flags);
}
#else /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
-void lkdtm_STACKLEAK_ERASING(void)
+static void lkdtm_STACKLEAK_ERASING(void)
{
if (IS_ENABLED(CONFIG_HAVE_ARCH_STACKLEAK)) {
pr_err("XFAIL: stackleak is not enabled (CONFIG_GCC_PLUGIN_STACKLEAK=n)\n");
@@ -133,3 +133,12 @@ void lkdtm_STACKLEAK_ERASING(void)
}
}
#endif /* defined(CONFIG_GCC_PLUGIN_STACKLEAK) */
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(STACKLEAK_ERASING),
+};
+
+struct crashtype_category stackleak_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 9161ce7ed47a6..6215ec995cd3c 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@
*/
#include "lkdtm.h"
#include <linux/slab.h>
+#include <linux/highmem.h>
#include <linux/vmalloc.h>
#include <linux/sched/task_stack.h>
#include <linux/mman.h>
@@ -30,12 +31,12 @@ static const unsigned char test_text[] = "This is a test.\n";
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
- return stack + 0;
+ return stack + unconst;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
- unsigned char buf[32];
+ unsigned char buf[128];
int i;
/* Exercise stack to avoid everything living in registers. */
@@ -43,7 +44,12 @@ static noinline unsigned char *do_usercopy_stack_callee(int value)
buf[i] = value & 0xff;
}
- return trick_compiler(buf);
+ /*
+ * Put the target buffer in the middle of stack allocation
+ * so that we don't step on future stack users regardless
+ * of stack growth direction.
+ */
+ return trick_compiler(&buf[(128/2)-32]);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
@@ -66,6 +72,12 @@ static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
bad_stack -= sizeof(unsigned long);
}
+#ifdef ARCH_HAS_CURRENT_STACK_POINTER
+ pr_info("stack : %px\n", (void *)current_stack_pointer);
+#endif
+ pr_info("good_stack: %px-%px\n", good_stack, good_stack + sizeof(good_stack));
+ pr_info("bad_stack : %px-%px\n", bad_stack, bad_stack + sizeof(good_stack));
+
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
@@ -119,7 +131,7 @@ free_user:
* This checks for whole-object size validation with hardened usercopy,
* with or without usercopy whitelisting.
*/
-static void do_usercopy_heap_size(bool to_user)
+static void do_usercopy_slab_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
@@ -185,9 +197,9 @@ free_kernel:
/*
* This checks for the specific whitelist window within an object. If this
- * test passes, then do_usercopy_heap_size() tests will pass too.
+ * test passes, then do_usercopy_slab_size() tests will pass too.
*/
-static void do_usercopy_heap_whitelist(bool to_user)
+static void do_usercopy_slab_whitelist(bool to_user)
{
unsigned long user_alloc;
unsigned char *buf = NULL;
@@ -261,42 +273,42 @@ free_alloc:
}
/* Callable tests. */
-void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_TO(void)
{
- do_usercopy_heap_size(true);
+ do_usercopy_slab_size(true);
}
-void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+static void lkdtm_USERCOPY_SLAB_SIZE_FROM(void)
{
- do_usercopy_heap_size(false);
+ do_usercopy_slab_size(false);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_TO(void)
{
- do_usercopy_heap_whitelist(true);
+ do_usercopy_slab_whitelist(true);
}
-void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
+static void lkdtm_USERCOPY_SLAB_WHITELIST_FROM(void)
{
- do_usercopy_heap_whitelist(false);
+ do_usercopy_slab_whitelist(false);
}
-void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+static void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
-void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+static void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
-void lkdtm_USERCOPY_STACK_BEYOND(void)
+static void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
-void lkdtm_USERCOPY_KERNEL(void)
+static void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
@@ -330,6 +342,86 @@ free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+ unsigned long uaddr;
+
+ uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (uaddr >= TASK_SIZE) {
+ pr_warn("Failed to allocate user memory\n");
+ return;
+ }
+
+ /* Initialize contents. */
+ memset(kaddr, 0xAA, PAGE_SIZE);
+
+ /* Bump the kaddr forward to detect a page-spanning overflow. */
+ kaddr += PAGE_SIZE / 2;
+
+ pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr,
+ unconst + (PAGE_SIZE / 2))) {
+ pr_err("copy_to_user() failed unexpectedly?!\n");
+ goto free_user;
+ }
+
+ pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+ name, kaddr);
+ if (copy_to_user((void __user *)uaddr, kaddr, unconst + PAGE_SIZE)) {
+ pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+ goto free_user;
+ }
+
+ pr_err("FAIL: bad copy_to_user() not detected!\n");
+ pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+ vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+ void *addr;
+
+ addr = vmalloc(PAGE_SIZE);
+ if (!addr) {
+ pr_err("vmalloc() failed!?\n");
+ return;
+ }
+ do_usercopy_page_span("vmalloc", addr);
+ vfree(addr);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+ struct folio *folio;
+ void *addr;
+
+ /*
+ * FIXME: Folio checking currently misses 0-order allocations, so
+ * allocate and bump forward to the last page.
+ */
+ folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+ if (!folio) {
+ pr_err("folio_alloc() failed!?\n");
+ return;
+ }
+ addr = folio_address(folio);
+ if (addr)
+ do_usercopy_page_span("folio", addr + PAGE_SIZE);
+ else
+ pr_err("folio_address() failed?!\n");
+ folio_put(folio);
+}
+
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -345,3 +437,21 @@ void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(whitelist_cache);
}
+
+static struct crashtype crashtypes[] = {
+ CRASHTYPE(USERCOPY_SLAB_SIZE_TO),
+ CRASHTYPE(USERCOPY_SLAB_SIZE_FROM),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_TO),
+ CRASHTYPE(USERCOPY_SLAB_WHITELIST_FROM),
+ CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+ CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+ CRASHTYPE(USERCOPY_STACK_BEYOND),
+ CRASHTYPE(USERCOPY_VMALLOC),
+ CRASHTYPE(USERCOPY_FOLIO),
+ CRASHTYPE(USERCOPY_KERNEL),
+};
+
+struct crashtype_category usercopy_crashtypes = {
+ .crashtypes = crashtypes,
+ .len = ARRAY_SIZE(crashtypes),
+};