From 953b921688bb14675ebdf3c36148d5a6cd7e1a76 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 3 Jun 2024 07:09:00 +0200 Subject: Merging upstream version 6.8.12. Signed-off-by: Daniel Baumann --- lib/fortify_kunit.c | 16 ++++----- lib/kunit/device.c | 2 +- lib/kunit/test.c | 3 ++ lib/kunit/try-catch.c | 9 +++-- lib/maple_tree.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++ lib/overflow_kunit.c | 19 +++++++++++ lib/slub_kunit.c | 2 +- lib/test_hmm.c | 8 ++--- 8 files changed, 135 insertions(+), 17 deletions(-) (limited to 'lib') diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index 2e4fedc81..7830a9e64 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -229,28 +229,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc) \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \ - vfree(p)); \ + kvfree(p)); \ checker((expected_pages) * PAGE_SIZE, \ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \ - vfree(p)); \ + kvfree(p)); \ \ prev_size = (expected_pages) * PAGE_SIZE; \ orig = kvmalloc(prev_size, gfp); \ diff --git a/lib/kunit/device.c b/lib/kunit/device.c index 9ea399049..3a31fe9ed 100644 --- a/lib/kunit/device.c +++ b/lib/kunit/device.c @@ -51,7 +51,7 @@ int kunit_bus_init(void) error = bus_register(&kunit_bus_type); if (error) - bus_unregister(&kunit_bus_type); + root_device_unregister(kunit_bus_device); return error; } diff --git a/lib/kunit/test.c b/lib/kunit/test.c index 1d1475578..b8514dbb3 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_ { unsigned int i; + if (num_suites == 0) + return 0; + if (!kunit_enabled() && num_suites > 0) { pr_info("kunit: disabled\n"); return 0; diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index f7825991d..d9d1df28c 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "try-catch-impl.h" @@ -65,13 +66,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) try_catch->context = context; try_catch->try_completion = &try_completion; try_catch->try_result = 0; - task_struct = kthread_run(kunit_generic_run_threadfn_adapter, - try_catch, - "kunit_try_catch_thread"); + task_struct = kthread_create(kunit_generic_run_threadfn_adapter, + try_catch, "kunit_try_catch_thread"); if (IS_ERR(task_struct)) { try_catch->catch(try_catch->context); return; } + get_task_struct(task_struct); + wake_up_process(task_struct); time_remaining = wait_for_completion_timeout(&try_completion, kunit_test_timeout()); @@ -81,6 +83,7 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) kthread_stop(task_struct); } + put_task_struct(task_struct); exit_code = try_catch->try_result; if (!exit_code) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index d70db0575..fe6092a1d 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4290,6 +4290,56 @@ exists: } +/** + * mas_alloc_cyclic() - Internal call to find somewhere to store an entry + * @mas: The maple state. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, or -EBUSY if there are no + * free entries. + */ +int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + unsigned long min = range_lo; + int ret = 0; + + range_lo = max(min, *next); + ret = mas_empty_area(mas, range_lo, range_hi, 1); + if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { + mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; + ret = 1; + } + if (ret < 0 && range_lo > min) { + ret = mas_empty_area(mas, min, range_hi, 1); + if (ret == 0) + ret = 1; + } + if (ret < 0) + return ret; + + do { + mas_insert(mas, entry); + } while (mas_nomem(mas, gfp)); + if (mas_is_err(mas)) + return xa_err(mas->node); + + *startp = mas->index; + *next = *startp + 1; + if (*next == 0) + mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; + + return ret; +} +EXPORT_SYMBOL(mas_alloc_cyclic); + static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) { retry: @@ -6443,6 +6493,49 @@ unlock: } EXPORT_SYMBOL(mtree_alloc_range); +/** + * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. + * @mt: The maple tree. + * @startp: Pointer to ID. + * @range_lo: Lower bound of range to search. + * @range_hi: Upper bound of range to search. + * @entry: The entry to store. + * @next: Pointer to next ID to allocate. + * @gfp: The GFP_FLAGS to use for allocations. + * + * Finds an empty entry in @mt after @next, stores the new index into + * the @id pointer, stores the entry at that index, then updates @next. + * + * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. + * + * Context: Any context. Takes and releases the mt.lock. May sleep if + * the @gfp flags permit. + * + * Return: 0 if the allocation succeeded without wrapping, 1 if the + * allocation succeeded after wrapping, -ENOMEM if memory could not be + * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no + * free entries. + */ +int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, + void *entry, unsigned long range_lo, unsigned long range_hi, + unsigned long *next, gfp_t gfp) +{ + int ret; + + MA_STATE(mas, mt, 0, 0); + + if (!mt_is_alloc(mt)) + return -EINVAL; + if (WARN_ON_ONCE(mt_is_reserved(entry))) + return -EINVAL; + mtree_lock(mt); + ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, + next, gfp); + mtree_unlock(mt); + return ret; +} +EXPORT_SYMBOL(mtree_alloc_cyclic); + int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, void *entry, unsigned long size, unsigned long min, unsigned long max, gfp_t gfp) diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index c527f6b75..c85c8b121 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -1113,6 +1113,24 @@ static void castable_to_type_test(struct kunit *test) #undef TEST_CASTABLE_TO_TYPE } +struct foo { + int a; + u32 counter; + s16 array[] __counted_by(counter); +}; + +static void DEFINE_FLEX_test(struct kunit *test) +{ + DEFINE_RAW_FLEX(struct foo, two, array, 2); + DEFINE_FLEX(struct foo, eight, array, counter, 8); + DEFINE_FLEX(struct foo, empty, array, counter, 0); + + KUNIT_EXPECT_EQ(test, __struct_size(two), + sizeof(struct foo) + sizeof(s16) + sizeof(s16)); + KUNIT_EXPECT_EQ(test, __struct_size(eight), 24); + KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo)); +} + static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test), @@ -1135,6 +1153,7 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(overflows_type_test), KUNIT_CASE(same_type_test), KUNIT_CASE(castable_to_type_test), + KUNIT_CASE(DEFINE_FLEX_test), {} }; diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c index d4a3730b0..4ce960438 100644 --- a/lib/slub_kunit.c +++ b/lib/slub_kunit.c @@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test) ptr_addr = (unsigned long *)(p + s->offset); tmp = *ptr_addr; - p[s->offset] = 0x12; + p[s->offset] = ~p[s->offset]; /* * Expecting three errors. diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 717dcb830..b823ba7cb 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk) unsigned long *src_pfns; unsigned long *dst_pfns; - src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); - dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); + src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL); + dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL); migrate_device_range(src_pfns, start_pfn, npages); for (i = 0; i < npages; i++) { @@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk) } migrate_device_pages(src_pfns, dst_pfns, npages); migrate_device_finalize(src_pfns, dst_pfns, npages); - kfree(src_pfns); - kfree(dst_pfns); + kvfree(src_pfns); + kvfree(dst_pfns); } /* Removes free pages from the free list so they can't be re-allocated */ -- cgit v1.2.3