From e54def4ad8144ab15f826416e2e0f290ef1901b4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:30 +0200 Subject: Adding upstream version 6.9.2. Signed-off-by: Daniel Baumann --- mm/kasan/common.c | 2 +- mm/kasan/generic.c | 22 ++++-------- mm/kasan/kasan_test.c | 79 ++++++++++++++++++++++++++++++++++++++++++++ mm/kasan/kasan_test_module.c | 4 +-- mm/kasan/report.c | 2 +- mm/kasan/shadow.c | 11 ++---- 6 files changed, 91 insertions(+), 29 deletions(-) (limited to 'mm/kasan') diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 6ca63e8dd..e7c9a4dc8 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -55,7 +55,7 @@ void kasan_set_track(struct kasan_track *track, depot_stack_handle_t stack) u64 ts_nsec = local_clock(); track->cpu = cpu; - track->timestamp = ts_nsec >> 3; + track->timestamp = ts_nsec >> 9; #endif /* CONFIG_KASAN_EXTRA_INFO */ track->pid = current->pid; track->stack = stack; diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 1900f8576..6310a1802 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -334,14 +334,6 @@ DEFINE_ASAN_SET_SHADOW(f3); DEFINE_ASAN_SET_SHADOW(f5); DEFINE_ASAN_SET_SHADOW(f8); -/* Only allow cache merging when no per-object metadata is present. */ -slab_flags_t kasan_never_merge(void) -{ - if (!kasan_requires_meta()) - return 0; - return SLAB_KASAN; -} - /* * Adaptive redzone policy taken from the userspace AddressSanitizer runtime. * For larger allocations larger redzones are used. @@ -370,15 +362,13 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, return; /* - * SLAB_KASAN is used to mark caches that are sanitized by KASAN - * and that thus have per-object metadata. - * Currently this flag is used in two places: - * 1. In slab_ksize() to account for per-object metadata when - * calculating the size of the accessible memory within the object. - * 2. In slab_common.c via kasan_never_merge() to prevent merging of - * caches with per-object metadata. + * SLAB_KASAN is used to mark caches that are sanitized by KASAN and + * that thus have per-object metadata. Currently, this flag is used in + * slab_ksize() to account for per-object metadata when calculating the + * size of the accessible memory within the object. Additionally, we use + * SLAB_NO_MERGE to prevent merging of caches with per-object metadata. */ - *flags |= SLAB_KASAN; + *flags |= SLAB_KASAN | SLAB_NO_MERGE; ok_size = *size; diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test.c index e26a2583a..7b32be2a3 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test.c @@ -698,6 +698,84 @@ static void kmalloc_uaf3(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]); } +static void kasan_atomics_helper(struct kunit *test, void *unsafe, void *safe) +{ + int *i_unsafe = unsafe; + + KUNIT_EXPECT_KASAN_FAIL(test, READ_ONCE(*i_unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, WRITE_ONCE(*i_unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, smp_load_acquire(i_unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, smp_store_release(i_unsafe, 42)); + + KUNIT_EXPECT_KASAN_FAIL(test, atomic_read(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_set(unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_and(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_andnot(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_or(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_xor(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_xchg(unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_cmpxchg(unsafe, 21, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(unsafe, safe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_try_cmpxchg(safe, unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_sub_and_test(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_and_test(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_and_test(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_negative(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_add_unless(unsafe, 21, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_not_zero(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_inc_unless_negative(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_unless_positive(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_dec_if_positive(unsafe)); + + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_read(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_set(unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_and(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_andnot(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_or(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xor(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_xchg(unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_cmpxchg(unsafe, 21, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(unsafe, safe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_try_cmpxchg(safe, unsafe, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_sub_and_test(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_and_test(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_and_test(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_negative(42, unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_add_unless(unsafe, 21, 42)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_not_zero(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_inc_unless_negative(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_unless_positive(unsafe)); + KUNIT_EXPECT_KASAN_FAIL(test, atomic_long_dec_if_positive(unsafe)); +} + +static void kasan_atomics(struct kunit *test) +{ + void *a1, *a2; + + /* + * Just as with kasan_bitops_tags(), we allocate 48 bytes of memory such + * that the following 16 bytes will make up the redzone. + */ + a1 = kzalloc(48, GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a1); + a2 = kzalloc(sizeof(atomic_long_t), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, a2); + + /* Use atomics to access the redzone. */ + kasan_atomics_helper(test, a1 + 48, a2); + + kfree(a1); + kfree(a2); +} + static void kmalloc_double_kzfree(struct kunit *test) { char *ptr; @@ -1884,6 +1962,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_strings), KUNIT_CASE(kasan_bitops_generic), KUNIT_CASE(kasan_bitops_tags), + KUNIT_CASE(kasan_atomics), KUNIT_CASE(vmalloc_helpers_tags), KUNIT_CASE(vmalloc_oob), KUNIT_CASE(vmap_tags), diff --git a/mm/kasan/kasan_test_module.c b/mm/kasan/kasan_test_module.c index 8b7b3ea2c..27ec22767 100644 --- a/mm/kasan/kasan_test_module.c +++ b/mm/kasan/kasan_test_module.c @@ -62,7 +62,7 @@ static noinline void __init copy_user_test(void) kfree(kmem); } -static int __init test_kasan_module_init(void) +static int __init kasan_test_module_init(void) { /* * Temporarily enable multi-shot mode. Otherwise, KASAN would only @@ -77,5 +77,5 @@ static int __init test_kasan_module_init(void) return -EAGAIN; } -module_init(test_kasan_module_init); +module_init(kasan_test_module_init); MODULE_LICENSE("GPL"); diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 7afa4feb0..b48c768ac 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -267,7 +267,7 @@ static void print_track(struct kasan_track *track, const char *prefix) u64 ts_nsec = track->timestamp; unsigned long rem_usec; - ts_nsec <<= 3; + ts_nsec <<= 9; rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000; pr_err("%s by task %u on cpu %d at %lu.%06lus:\n", diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 9ef84f318..d6210ca48 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -199,19 +199,12 @@ static bool shadow_mapped(unsigned long addr) pud = pud_offset(p4d, addr); if (pud_none(*pud)) return false; - - /* - * We can't use pud_large() or pud_huge(), the first one is - * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse - * pud_bad(), if pud is bad then it's bad because it's huge. - */ - if (pud_bad(*pud)) + if (pud_leaf(*pud)) return true; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return false; - - if (pmd_bad(*pmd)) + if (pmd_leaf(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); return !pte_none(ptep_get(pte)); -- cgit v1.2.3