diff options
Diffstat (limited to '')
-rw-r--r-- | mm/kmsan/core.c | 127 | ||||
-rw-r--r-- | mm/kmsan/kmsan_test.c | 109 | ||||
-rw-r--r-- | mm/kmsan/shadow.c | 9 |
3 files changed, 109 insertions, 136 deletions
diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index 3adb4c1d3b..c19f47af04 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -83,131 +83,66 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, /* Copy the metadata following the memmove() behavior. */ void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n) { + depot_stack_handle_t prev_old_origin = 0, prev_new_origin = 0; + int i, iter, step, src_off, dst_off, oiter_src, oiter_dst; depot_stack_handle_t old_origin = 0, new_origin = 0; - int src_slots, dst_slots, i, iter, step, skip_bits; depot_stack_handle_t *origin_src, *origin_dst; - void *shadow_src, *shadow_dst; - u32 *align_shadow_src, shadow; + u8 *shadow_src, *shadow_dst; + u32 *align_shadow_dst; bool backwards; shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW); if (!shadow_dst) return; KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n)); + align_shadow_dst = + (u32 *)ALIGN_DOWN((u64)shadow_dst, KMSAN_ORIGIN_SIZE); shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW); if (!shadow_src) { - /* - * @src is untracked: zero out destination shadow, ignore the - * origins, we're done. - */ - __memset(shadow_dst, 0, n); + /* @src is untracked: mark @dst as initialized. */ + kmsan_internal_unpoison_memory(dst, n, /*checked*/ false); return; } KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n)); - __memmove(shadow_dst, shadow_src, n); - origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN); origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN); KMSAN_WARN_ON(!origin_dst || !origin_src); - src_slots = (ALIGN((u64)src + n, KMSAN_ORIGIN_SIZE) - - ALIGN_DOWN((u64)src, KMSAN_ORIGIN_SIZE)) / - KMSAN_ORIGIN_SIZE; - dst_slots = (ALIGN((u64)dst + n, KMSAN_ORIGIN_SIZE) - - ALIGN_DOWN((u64)dst, KMSAN_ORIGIN_SIZE)) / - KMSAN_ORIGIN_SIZE; - KMSAN_WARN_ON((src_slots < 1) || (dst_slots < 1)); - KMSAN_WARN_ON((src_slots - dst_slots > 1) || - (dst_slots - src_slots < -1)); backwards = dst > src; - i = backwards ? min(src_slots, dst_slots) - 1 : 0; - iter = backwards ? -1 : 1; - - align_shadow_src = - (u32 *)ALIGN_DOWN((u64)shadow_src, KMSAN_ORIGIN_SIZE); - for (step = 0; step < min(src_slots, dst_slots); step++, i += iter) { - KMSAN_WARN_ON(i < 0); - shadow = align_shadow_src[i]; - if (i == 0) { - /* - * If @src isn't aligned on KMSAN_ORIGIN_SIZE, don't - * look at the first @src % KMSAN_ORIGIN_SIZE bytes - * of the first shadow slot. - */ - skip_bits = ((u64)src % KMSAN_ORIGIN_SIZE) * 8; - shadow = (shadow >> skip_bits) << skip_bits; + step = backwards ? -1 : 1; + iter = backwards ? n - 1 : 0; + src_off = (u64)src % KMSAN_ORIGIN_SIZE; + dst_off = (u64)dst % KMSAN_ORIGIN_SIZE; + + /* Copy shadow bytes one by one, updating the origins if necessary. */ + for (i = 0; i < n; i++, iter += step) { + oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE; + oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE; + if (!shadow_src[iter]) { + shadow_dst[iter] = 0; + if (!align_shadow_dst[oiter_dst]) + origin_dst[oiter_dst] = 0; + continue; } - if (i == src_slots - 1) { - /* - * If @src + n isn't aligned on - * KMSAN_ORIGIN_SIZE, don't look at the last - * (@src + n) % KMSAN_ORIGIN_SIZE bytes of the - * last shadow slot. - */ - skip_bits = (((u64)src + n) % KMSAN_ORIGIN_SIZE) * 8; - shadow = (shadow << skip_bits) >> skip_bits; - } - /* - * Overwrite the origin only if the corresponding - * shadow is nonempty. - */ - if (origin_src[i] && (origin_src[i] != old_origin) && shadow) { - old_origin = origin_src[i]; - new_origin = kmsan_internal_chain_origin(old_origin); + shadow_dst[iter] = shadow_src[iter]; + old_origin = origin_src[oiter_src]; + if (old_origin == prev_old_origin) + new_origin = prev_new_origin; + else { /* * kmsan_internal_chain_origin() may return * NULL, but we don't want to lose the previous * origin value. */ + new_origin = kmsan_internal_chain_origin(old_origin); if (!new_origin) new_origin = old_origin; } - if (shadow) - origin_dst[i] = new_origin; - else - origin_dst[i] = 0; - } - /* - * If dst_slots is greater than src_slots (i.e. - * dst_slots == src_slots + 1), there is an extra origin slot at the - * beginning or end of the destination buffer, for which we take the - * origin from the previous slot. - * This is only done if the part of the source shadow corresponding to - * slot is non-zero. - * - * E.g. if we copy 8 aligned bytes that are marked as uninitialized - * and have origins o111 and o222, to an unaligned buffer with offset 1, - * these two origins are copied to three origin slots, so one of then - * needs to be duplicated, depending on the copy direction (@backwards) - * - * src shadow: |uuuu|uuuu|....| - * src origin: |o111|o222|....| - * - * backwards = 0: - * dst shadow: |.uuu|uuuu|u...| - * dst origin: |....|o111|o222| - fill the empty slot with o111 - * backwards = 1: - * dst shadow: |.uuu|uuuu|u...| - * dst origin: |o111|o222|....| - fill the empty slot with o222 - */ - if (src_slots < dst_slots) { - if (backwards) { - shadow = align_shadow_src[src_slots - 1]; - skip_bits = (((u64)dst + n) % KMSAN_ORIGIN_SIZE) * 8; - shadow = (shadow << skip_bits) >> skip_bits; - if (shadow) - /* src_slots > 0, therefore dst_slots is at least 2 */ - origin_dst[dst_slots - 1] = - origin_dst[dst_slots - 2]; - } else { - shadow = align_shadow_src[0]; - skip_bits = ((u64)dst % KMSAN_ORIGIN_SIZE) * 8; - shadow = (shadow >> skip_bits) << skip_bits; - if (shadow) - origin_dst[0] = origin_dst[1]; - } + origin_dst[oiter_dst] = new_origin; + prev_new_origin = new_origin; + prev_old_origin = old_origin; } } diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c index 312989aa28..07d3a3a5a9 100644 --- a/mm/kmsan/kmsan_test.c +++ b/mm/kmsan/kmsan_test.c @@ -67,6 +67,17 @@ static bool report_available(void) return READ_ONCE(observed.available); } +/* Reset observed.available, so that the test can trigger another report. */ +static void report_reset(void) +{ + unsigned long flags; + + spin_lock_irqsave(&observed.lock, flags); + WRITE_ONCE(observed.available, false); + observed.ignore = false; + spin_unlock_irqrestore(&observed.lock, flags); +} + /* Information we expect in a report. */ struct expect_report { const char *error_type; /* Error type. */ @@ -407,33 +418,25 @@ static void test_printk(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } -/* - * Prevent the compiler from optimizing @var away. Without this, Clang may - * notice that @var is uninitialized and drop memcpy() calls that use it. - * - * There is OPTIMIZER_HIDE_VAR() in linux/compier.h that we cannot use here, - * because it is implemented as inline assembly receiving @var as a parameter - * and will enforce a KMSAN check. Same is true for e.g. barrier_data(var). - */ -#define DO_NOT_OPTIMIZE(var) barrier() +/* Prevent the compiler from inlining a memcpy() call. */ +static noinline void *memcpy_noinline(volatile void *dst, + const volatile void *src, size_t size) +{ + return memcpy((void *)dst, (const void *)src, size); +} -/* - * Test case: ensure that memcpy() correctly copies initialized values. - * Also serves as a regression test to ensure DO_NOT_OPTIMIZE() does not cause - * extra checks. - */ +/* Test case: ensure that memcpy() correctly copies initialized values. */ static void test_init_memcpy(struct kunit *test) { EXPECTATION_NO_REPORT(expect); - volatile int src; - volatile int dst = 0; + volatile long long src; + volatile long long dst = 0; - DO_NOT_OPTIMIZE(src); src = 1; kunit_info( test, "memcpy()ing aligned initialized src to aligned dst (no reports)\n"); - memcpy((void *)&dst, (void *)&src, sizeof(src)); + memcpy_noinline((void *)&dst, (void *)&src, sizeof(src)); kmsan_check_memory((void *)&dst, sizeof(dst)); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } @@ -451,8 +454,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test) kunit_info( test, "memcpy()ing aligned uninit src to aligned dst (UMR report)\n"); - DO_NOT_OPTIMIZE(uninit_src); - memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src)); + memcpy_noinline((void *)&dst, (void *)&uninit_src, sizeof(uninit_src)); kmsan_check_memory((void *)&dst, sizeof(dst)); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } @@ -463,7 +465,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test) * * Copying aligned 4-byte value to an unaligned one leads to touching two * aligned 4-byte values. This test case checks that KMSAN correctly reports an - * error on the first of the two values. + * error on the mentioned two values. */ static void test_memcpy_aligned_to_unaligned(struct kunit *test) { @@ -474,33 +476,65 @@ static void test_memcpy_aligned_to_unaligned(struct kunit *test) kunit_info( test, "memcpy()ing aligned uninit src to unaligned dst (UMR report)\n"); - DO_NOT_OPTIMIZE(uninit_src); - memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); + kmsan_check_memory((void *)&uninit_src, sizeof(uninit_src)); + memcpy_noinline((void *)&dst[1], (void *)&uninit_src, + sizeof(uninit_src)); kmsan_check_memory((void *)dst, 4); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + report_reset(); + kmsan_check_memory((void *)&dst[4], sizeof(uninit_src)); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } /* - * Test case: ensure that memcpy() correctly copies uninitialized values between - * aligned `src` and unaligned `dst`. + * Test case: ensure that origin slots do not accidentally get overwritten with + * zeroes during memcpy(). * - * Copying aligned 4-byte value to an unaligned one leads to touching two - * aligned 4-byte values. This test case checks that KMSAN correctly reports an - * error on the second of the two values. + * Previously, when copying memory from an aligned buffer to an unaligned one, + * if there were zero origins corresponding to zero shadow values in the source + * buffer, they could have ended up being copied to nonzero shadow values in the + * destination buffer: + * + * memcpy(0xffff888080a00000, 0xffff888080900002, 8) + * + * src (0xffff888080900002): ..xx .... xx.. + * src origins: o111 0000 o222 + * dst (0xffff888080a00000): xx.. ..xx + * dst origins: o111 0000 + * (or 0000 o222) + * + * (here . stands for an initialized byte, and x for an uninitialized one. + * + * Ensure that this does not happen anymore, and for both destination bytes + * the origin is nonzero (i.e. KMSAN reports an error). */ -static void test_memcpy_aligned_to_unaligned2(struct kunit *test) +static void test_memcpy_initialized_gap(struct kunit *test) { - EXPECTATION_UNINIT_VALUE_FN(expect, - "test_memcpy_aligned_to_unaligned2"); - volatile int uninit_src; + EXPECTATION_UNINIT_VALUE_FN(expect, "test_memcpy_initialized_gap"); + volatile char uninit_src[12]; volatile char dst[8] = { 0 }; kunit_info( test, - "memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n"); - DO_NOT_OPTIMIZE(uninit_src); - memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src)); - kmsan_check_memory((void *)&dst[4], sizeof(uninit_src)); + "unaligned 4-byte initialized value gets a nonzero origin after memcpy() - (2 UMR reports)\n"); + + uninit_src[0] = 42; + uninit_src[1] = 42; + uninit_src[4] = 42; + uninit_src[5] = 42; + uninit_src[6] = 42; + uninit_src[7] = 42; + uninit_src[10] = 42; + uninit_src[11] = 42; + memcpy_noinline((void *)&dst[0], (void *)&uninit_src[2], 8); + + kmsan_check_memory((void *)&dst[0], 4); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + report_reset(); + kmsan_check_memory((void *)&dst[2], 4); + KUNIT_EXPECT_FALSE(test, report_matches(&expect)); + report_reset(); + kmsan_check_memory((void *)&dst[4], 4); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } @@ -513,7 +547,6 @@ static void test_memcpy_aligned_to_unaligned2(struct kunit *test) \ kunit_info(test, \ "memset" #size "() should initialize memory\n"); \ - DO_NOT_OPTIMIZE(uninit); \ memset##size((uint##size##_t *)&uninit, 0, 1); \ kmsan_check_memory((void *)&uninit, sizeof(uninit)); \ KUNIT_EXPECT_TRUE(test, report_matches(&expect)); \ @@ -598,7 +631,7 @@ static struct kunit_case kmsan_test_cases[] = { KUNIT_CASE(test_init_memcpy), KUNIT_CASE(test_memcpy_aligned_to_aligned), KUNIT_CASE(test_memcpy_aligned_to_unaligned), - KUNIT_CASE(test_memcpy_aligned_to_unaligned2), + KUNIT_CASE(test_memcpy_initialized_gap), KUNIT_CASE(test_memset16), KUNIT_CASE(test_memset32), KUNIT_CASE(test_memset64), diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c index 87318f9170..b9d05aff31 100644 --- a/mm/kmsan/shadow.c +++ b/mm/kmsan/shadow.c @@ -285,12 +285,17 @@ void __init kmsan_init_alloc_meta_for_range(void *start, void *end) size = PAGE_ALIGN((u64)end - (u64)start); shadow = memblock_alloc(size, PAGE_SIZE); origin = memblock_alloc(size, PAGE_SIZE); + + if (!shadow || !origin) + panic("%s: Failed to allocate metadata memory for early boot range of size %llu", + __func__, size); + for (u64 addr = 0; addr < size; addr += PAGE_SIZE) { page = virt_to_page_or_null((char *)start + addr); - shadow_p = virt_to_page_or_null((char *)shadow + addr); + shadow_p = virt_to_page((char *)shadow + addr); set_no_shadow_origin_page(shadow_p); shadow_page_for(page) = shadow_p; - origin_p = virt_to_page_or_null((char *)origin + addr); + origin_p = virt_to_page((char *)origin + addr); set_no_shadow_origin_page(origin_p); origin_page_for(page) = origin_p; } |