diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
commit | 3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch) | |
tree | 95a985d3019522cea546b7d8df621369bc44fc6c /lib | |
parent | Adding debian version 6.9.12-1. (diff) | |
download | linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'lib')
56 files changed, 2230 insertions, 812 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index c98e11c733..b0a76dff5c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -622,7 +622,7 @@ config SIGNATURE Implementation is done using GnuPG MPI library config DIMLIB - bool + tristate help Dynamic Interrupt Moderation library. Implements an algorithm for dynamically changing CQ moderation values diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 291185f54e..59b6765d86 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -969,6 +969,37 @@ config DEBUG_STACKOVERFLOW If in doubt, say "N". +config CODE_TAGGING + bool + select KALLSYMS + +config MEM_ALLOC_PROFILING + bool "Enable memory allocation profiling" + default n + depends on PROC_FS + depends on !DEBUG_FORCE_WEAK_PER_CPU + select CODE_TAGGING + select PAGE_EXTENSION + select SLAB_OBJ_EXT + help + Track allocation source code and record total allocation size + initiated at that code location. The mechanism can be used to track + memory leaks with a low performance and memory impact. + +config MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT + bool "Enable memory allocation profiling by default" + default y + depends on MEM_ALLOC_PROFILING + +config MEM_ALLOC_PROFILING_DEBUG + bool "Memory allocation profiler debugging" + default n + depends on MEM_ALLOC_PROFILING + select MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT + help + Adds warnings with helpful error messages for memory allocation + profiling. + source "lib/Kconfig.kasan" source "lib/Kconfig.kfence" source "lib/Kconfig.kmsan" @@ -1030,6 +1061,20 @@ config SOFTLOCKUP_DETECTOR chance to run. The current stack trace is displayed upon detection and the system will stay locked up. +config SOFTLOCKUP_DETECTOR_INTR_STORM + bool "Detect Interrupt Storm in Soft Lockups" + depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING + select GENERIC_IRQ_STAT_SNAPSHOT + default y if NR_CPUS <= 128 + help + Say Y here to enable the kernel to detect interrupt storm + during "soft lockups". + + "soft lockups" can be caused by a variety of reasons. If one is + caused by an interrupt storm, then the storming interrupts will not + be on the callstack. To detect this case, it is necessary to report + the CPU stats and the interrupt counts during the "soft lockups". + config BOOTPARAM_SOFTLOCKUP_PANIC bool "Panic (Reboot) On Soft Lockups" depends on SOFTLOCKUP_DETECTOR @@ -1251,7 +1296,7 @@ config SCHED_INFO config SCHEDSTATS bool "Collect scheduler statistics" - depends on DEBUG_KERNEL && PROC_FS + depends on PROC_FS select SCHED_INFO help If you say Y here, additional code will be inserted into the @@ -2437,7 +2482,6 @@ config TEST_LKM config TEST_BITOPS tristate "Test module for compilation of bitops operations" - depends on m help This builds the "test_bitops" module that is much like the TEST_LKM module except that it does a basic exercise of the @@ -2759,16 +2803,6 @@ config HW_BREAKPOINT_KUNIT_TEST If unsure, say N. -config STRCAT_KUNIT_TEST - tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - -config STRSCPY_KUNIT_TEST - tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - config SIPHASH_KUNIT_TEST tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2891,7 +2925,7 @@ config TEST_FREE_PAGES config TEST_FPU tristate "Test floating point operations in kernel space" - depends on X86 && !KCOV_INSTRUMENT_ALL + depends on ARCH_HAS_KERNEL_FPU_SUPPORT && !KCOV_INSTRUMENT_ALL help Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu which will trigger a sequence of floating point operations. This is used diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index b5c0e65767..537e1b3f57 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -122,6 +122,7 @@ config KDB_DEFAULT_ENABLE config KDB_KEYBOARD bool "KGDB_KDB: keyboard as input device" depends on VT && KGDB_KDB && !PARISC + depends on HAS_IOPORT default n help KDB can use a PS/2 type keyboard for an input device diff --git a/lib/Makefile b/lib/Makefile index ffc6b2341b..30337431d1 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -110,30 +110,10 @@ CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o -# -# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns -# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS -# get appended last to CFLAGS and thus override those previous compiler options. -# -FPU_CFLAGS := -msse -msse2 -ifdef CONFIG_CC_IS_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383 -# -# The "-msse" in the first argument is there so that the -# -mpreferred-stack-boundary=3 build error: -# -# -mpreferred-stack-boundary=3 is not between 4 and 12 -# -# can be triggered. Otherwise gcc doesn't complain. -FPU_CFLAGS += -mhard-float -FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4) -endif - obj-$(CONFIG_TEST_FPU) += test_fpu.o -CFLAGS_test_fpu.o += $(FPU_CFLAGS) +test_fpu-y := test_fpu_glue.o test_fpu_impl.o +CFLAGS_test_fpu_impl.o += $(CC_FLAGS_FPU) +CFLAGS_REMOVE_test_fpu_impl.o += $(CC_FLAGS_NO_FPU) # Some KUnit files (hooks.o) need to be built-in even when KUnit is a module, # so we can't just use obj-$(CONFIG_KUNIT). @@ -233,9 +213,13 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ of-reconfig-notifier-error-inject.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +obj-$(CONFIG_CODE_TAGGING) += codetag.o +obj-$(CONFIG_MEM_ALLOC_PROFILING) += alloc_tag.o + lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o +obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o #ensure exported functions have prototypes @@ -352,7 +336,7 @@ $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \ $(call cmd,build_OID_registry) quiet_cmd_build_OID_registry = GEN $@ - cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@ + cmd_build_OID_registry = perl $(src)/build_OID_registry $< $@ clean-files += oid_registry_data.c @@ -403,8 +387,6 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread) CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation) CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o -obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o -obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o @@ -412,8 +394,8 @@ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o # FORTIFY_SOURCE compile-time behavior tests -TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c) -TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS)) +TEST_FORTIFY_SRCS = $(wildcard $(src)/test_fortify/*-*.c) +TEST_FORTIFY_LOGS = $(patsubst $(src)/%.c, %.log, $(TEST_FORTIFY_SRCS)) TEST_FORTIFY_LOG = test_fortify.log quiet_cmd_test_fortify = TEST $@ @@ -444,3 +426,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE ifeq ($(CONFIG_FORTIFY_SOURCE),y) $(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG) endif + +# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined. +# Pass CFLAGS_KASAN to avoid warnings. +$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y)) diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c new file mode 100644 index 0000000000..c347b8b72d --- /dev/null +++ b/lib/alloc_tag.c @@ -0,0 +1,273 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/alloc_tag.h> +#include <linux/fs.h> +#include <linux/gfp.h> +#include <linux/module.h> +#include <linux/page_ext.h> +#include <linux/proc_fs.h> +#include <linux/seq_buf.h> +#include <linux/seq_file.h> + +static struct codetag_type *alloc_tag_cttype; + +DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); +EXPORT_SYMBOL(_shared_alloc_tag); + +DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + mem_alloc_profiling_key); + +struct allocinfo_private { + struct codetag_iterator iter; + bool print_header; +}; + +static void *allocinfo_start(struct seq_file *m, loff_t *pos) +{ + struct allocinfo_private *priv; + struct codetag *ct; + loff_t node = *pos; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + m->private = priv; + if (!priv) + return NULL; + + priv->print_header = (node == 0); + codetag_lock_module_list(alloc_tag_cttype, true); + priv->iter = codetag_get_ct_iter(alloc_tag_cttype); + while ((ct = codetag_next_ct(&priv->iter)) != NULL && node) + node--; + + return ct ? priv : NULL; +} + +static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) +{ + struct allocinfo_private *priv = (struct allocinfo_private *)arg; + struct codetag *ct = codetag_next_ct(&priv->iter); + + (*pos)++; + if (!ct) + return NULL; + + return priv; +} + +static void allocinfo_stop(struct seq_file *m, void *arg) +{ + struct allocinfo_private *priv = (struct allocinfo_private *)m->private; + + if (priv) { + codetag_lock_module_list(alloc_tag_cttype, false); + kfree(priv); + } +} + +static void print_allocinfo_header(struct seq_buf *buf) +{ + /* Output format version, so we can change it. */ + seq_buf_printf(buf, "allocinfo - version: 1.0\n"); + seq_buf_printf(buf, "# <size> <calls> <tag info>\n"); +} + +static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) +{ + struct alloc_tag *tag = ct_to_alloc_tag(ct); + struct alloc_tag_counters counter = alloc_tag_read(tag); + s64 bytes = counter.bytes; + + seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); + codetag_to_text(out, ct); + seq_buf_putc(out, ' '); + seq_buf_putc(out, '\n'); +} + +static int allocinfo_show(struct seq_file *m, void *arg) +{ + struct allocinfo_private *priv = (struct allocinfo_private *)arg; + char *bufp; + size_t n = seq_get_buf(m, &bufp); + struct seq_buf buf; + + seq_buf_init(&buf, bufp, n); + if (priv->print_header) { + print_allocinfo_header(&buf); + priv->print_header = false; + } + alloc_tag_to_text(&buf, priv->iter.ct); + seq_commit(m, seq_buf_used(&buf)); + return 0; +} + +static const struct seq_operations allocinfo_seq_op = { + .start = allocinfo_start, + .next = allocinfo_next, + .stop = allocinfo_stop, + .show = allocinfo_show, +}; + +size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) +{ + struct codetag_iterator iter; + struct codetag *ct; + struct codetag_bytes n; + unsigned int i, nr = 0; + + if (can_sleep) + codetag_lock_module_list(alloc_tag_cttype, true); + else if (!codetag_trylock_module_list(alloc_tag_cttype)) + return 0; + + iter = codetag_get_ct_iter(alloc_tag_cttype); + while ((ct = codetag_next_ct(&iter))) { + struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); + + n.ct = ct; + n.bytes = counter.bytes; + + for (i = 0; i < nr; i++) + if (n.bytes > tags[i].bytes) + break; + + if (i < count) { + nr -= nr == count; + memmove(&tags[i + 1], + &tags[i], + sizeof(tags[0]) * (nr - i)); + nr++; + tags[i] = n; + } + } + + codetag_lock_module_list(alloc_tag_cttype, false); + + return nr; +} + +static void __init procfs_init(void) +{ + proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op); +} + +static bool alloc_tag_module_unload(struct codetag_type *cttype, + struct codetag_module *cmod) +{ + struct codetag_iterator iter = codetag_get_ct_iter(cttype); + struct alloc_tag_counters counter; + bool module_unused = true; + struct alloc_tag *tag; + struct codetag *ct; + + for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) { + if (iter.cmod != cmod) + continue; + + tag = ct_to_alloc_tag(ct); + counter = alloc_tag_read(tag); + + if (WARN(counter.bytes, + "%s:%u module %s func:%s has %llu allocated at module unload", + ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes)) + module_unused = false; + } + + return module_unused; +} + +#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT +static bool mem_profiling_support __meminitdata = true; +#else +static bool mem_profiling_support __meminitdata; +#endif + +static int __init setup_early_mem_profiling(char *str) +{ + bool enable; + + if (!str || !str[0]) + return -EINVAL; + + if (!strncmp(str, "never", 5)) { + enable = false; + mem_profiling_support = false; + } else { + int res; + + res = kstrtobool(str, &enable); + if (res) + return res; + + mem_profiling_support = true; + } + + if (enable != static_key_enabled(&mem_alloc_profiling_key)) { + if (enable) + static_branch_enable(&mem_alloc_profiling_key); + else + static_branch_disable(&mem_alloc_profiling_key); + } + + return 0; +} +early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling); + +static __init bool need_page_alloc_tagging(void) +{ + return mem_profiling_support; +} + +static __init void init_page_alloc_tagging(void) +{ +} + +struct page_ext_operations page_alloc_tagging_ops = { + .size = sizeof(union codetag_ref), + .need = need_page_alloc_tagging, + .init = init_page_alloc_tagging, +}; +EXPORT_SYMBOL(page_alloc_tagging_ops); + +#ifdef CONFIG_SYSCTL +static struct ctl_table memory_allocation_profiling_sysctls[] = { + { + .procname = "mem_profiling", + .data = &mem_alloc_profiling_key, +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + .mode = 0444, +#else + .mode = 0644, +#endif + .proc_handler = proc_do_static_key, + }, + { } +}; + +static void __init sysctl_init(void) +{ + if (!mem_profiling_support) + memory_allocation_profiling_sysctls[0].mode = 0444; + + register_sysctl_init("vm", memory_allocation_profiling_sysctls); +} +#else /* CONFIG_SYSCTL */ +static inline void sysctl_init(void) {} +#endif /* CONFIG_SYSCTL */ + +static int __init alloc_tag_init(void) +{ + const struct codetag_type_desc desc = { + .section = "alloc_tags", + .tag_size = sizeof(struct alloc_tag), + .module_unload = alloc_tag_module_unload, + }; + + alloc_tag_cttype = codetag_register_type(&desc); + if (IS_ERR(alloc_tag_cttype)) + return PTR_ERR(alloc_tag_cttype); + + sysctl_init(); + procfs_init(); + + return 0; +} +module_init(alloc_tag_init); diff --git a/lib/build_OID_registry b/lib/build_OID_registry index d7fc32ea8a..8267e8d713 100755 --- a/lib/build_OID_registry +++ b/lib/build_OID_registry @@ -8,6 +8,7 @@ # use strict; +use Cwd qw(abs_path); my @names = (); my @oids = (); @@ -17,6 +18,8 @@ if ($#ARGV != 1) { exit(2); } +my $abs_srctree = abs_path($ENV{'srctree'}); + # # Open the file to read from # @@ -35,7 +38,9 @@ close IN_FILE || die; # open C_FILE, ">$ARGV[1]" or die; print C_FILE "/*\n"; -print C_FILE " * Automatically generated by ", $0, ". Do not edit\n"; +my $scriptname = $0; +$scriptname =~ s#^\Q$abs_srctree/\E##; +print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n"; print C_FILE " */\n"; # diff --git a/lib/buildid.c b/lib/buildid.c index 898301b49e..7954dd92e3 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -182,8 +182,8 @@ unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init; */ void __init init_vmlinux_build_id(void) { - extern const void __start_notes __weak; - extern const void __stop_notes __weak; + extern const void __start_notes; + extern const void __stop_notes; unsigned int size = &__stop_notes - &__start_notes; build_id_parse_buf(&__start_notes, vmlinux_build_id, size); diff --git a/lib/closure.c b/lib/closure.c index 99380d9b4a..116afae2ee 100644 --- a/lib/closure.c +++ b/lib/closure.c @@ -13,7 +13,7 @@ #include <linux/seq_file.h> #include <linux/sched/debug.h> -static inline void closure_put_after_sub(struct closure *cl, int flags) +static inline void closure_put_after_sub_checks(int flags) { int r = flags & CLOSURE_REMAINING_MASK; @@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) r &= ~CLOSURE_GUARD_MASK; - if (!r) { - smp_acquire__after_ctrl_dep(); + WARN(!r && (flags & ~CLOSURE_DESTRUCTOR), + "closure ref hit 0 with incorrect flags set: %x (%u)", + flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags)); +} + +static inline void closure_put_after_sub(struct closure *cl, int flags) +{ + closure_put_after_sub_checks(flags); - WARN(flags & ~CLOSURE_DESTRUCTOR, - "closure ref hit 0 with incorrect flags set: %x (%u)", - flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags)); + if (!(flags & CLOSURE_REMAINING_MASK)) { + smp_acquire__after_ctrl_dep(); cl->closure_get_happened = false; @@ -145,6 +150,78 @@ void __sched __closure_sync(struct closure *cl) } EXPORT_SYMBOL(__closure_sync); +/* + * closure_return_sync - finish running a closure, synchronously (i.e. waiting + * for outstanding get()s to finish) and returning once closure refcount is 0. + * + * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent + * closure_get_not_zero() calls waill fail. + */ +void __sched closure_return_sync(struct closure *cl) +{ + struct closure_syncer s = { .task = current }; + + cl->s = &s; + set_closure_fn(cl, closure_sync_fn, NULL); + + unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR, + &cl->remaining); + + closure_put_after_sub_checks(flags); + + if (unlikely(flags & CLOSURE_REMAINING_MASK)) { + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (s.done) + break; + schedule(); + } + + __set_current_state(TASK_RUNNING); + } + + if (cl->parent) + closure_put(cl->parent); +} +EXPORT_SYMBOL(closure_return_sync); + +int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout) +{ + struct closure_syncer s = { .task = current }; + int ret = 0; + + cl->s = &s; + continue_at(cl, closure_sync_fn, NULL); + + while (1) { + set_current_state(TASK_UNINTERRUPTIBLE); + if (s.done) + break; + if (!timeout) { + /* + * Carefully undo the continue_at() - but only if it + * hasn't completed, i.e. the final closure_put() hasn't + * happened yet: + */ + unsigned old, new, v = atomic_read(&cl->remaining); + do { + old = v; + if (!old || (old & CLOSURE_RUNNING)) + goto success; + + new = old + CLOSURE_REMAINING_INITIALIZER; + } while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old); + ret = -ETIME; + } + + timeout = schedule_timeout(timeout); + } +success: + __set_current_state(TASK_RUNNING); + return ret; +} +EXPORT_SYMBOL(__closure_sync_timeout); + #ifdef CONFIG_DEBUG_CLOSURES static LIST_HEAD(closure_list); @@ -167,6 +244,9 @@ void closure_debug_destroy(struct closure *cl) { unsigned long flags; + if (cl->magic == CLOSURE_MAGIC_STACK) + return; + BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE); cl->magic = CLOSURE_MAGIC_DEAD; diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c new file mode 100644 index 0000000000..27f6f97cb6 --- /dev/null +++ b/lib/cmpxchg-emu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Emulated 1-byte cmpxchg operation for architectures lacking direct + * support for this size. This is implemented in terms of 4-byte cmpxchg + * operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#include <linux/types.h> +#include <linux/export.h> +#include <linux/instrumented.h> +#include <linux/atomic.h> +#include <linux/panic.h> +#include <linux/bug.h> +#include <asm-generic/rwonce.h> +#include <linux/cmpxchg-emu.h> + +union u8_32 { + u8 b[4]; + u32 w; +}; + +/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */ +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new) +{ + u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3); + int i = ((uintptr_t)p) & 0x3; + union u8_32 old32; + union u8_32 new32; + u32 ret; + + ret = READ_ONCE(*p32); + do { + old32.w = ret; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + instrument_atomic_read_write(p, 1); + ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above. + } while (ret != old32.w); + return old; +} +EXPORT_SYMBOL_GPL(cmpxchg_emu_u8); diff --git a/lib/codetag.c b/lib/codetag.c new file mode 100644 index 0000000000..5ace625f23 --- /dev/null +++ b/lib/codetag.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <linux/codetag.h> +#include <linux/idr.h> +#include <linux/kallsyms.h> +#include <linux/module.h> +#include <linux/seq_buf.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +struct codetag_type { + struct list_head link; + unsigned int count; + struct idr mod_idr; + struct rw_semaphore mod_lock; /* protects mod_idr */ + struct codetag_type_desc desc; +}; + +struct codetag_range { + struct codetag *start; + struct codetag *stop; +}; + +struct codetag_module { + struct module *mod; + struct codetag_range range; +}; + +static DEFINE_MUTEX(codetag_lock); +static LIST_HEAD(codetag_types); + +void codetag_lock_module_list(struct codetag_type *cttype, bool lock) +{ + if (lock) + down_read(&cttype->mod_lock); + else + up_read(&cttype->mod_lock); +} + +bool codetag_trylock_module_list(struct codetag_type *cttype) +{ + return down_read_trylock(&cttype->mod_lock) != 0; +} + +struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype) +{ + struct codetag_iterator iter = { + .cttype = cttype, + .cmod = NULL, + .mod_id = 0, + .ct = NULL, + }; + + return iter; +} + +static inline struct codetag *get_first_module_ct(struct codetag_module *cmod) +{ + return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL; +} + +static inline +struct codetag *get_next_module_ct(struct codetag_iterator *iter) +{ + struct codetag *res = (struct codetag *) + ((char *)iter->ct + iter->cttype->desc.tag_size); + + return res < iter->cmod->range.stop ? res : NULL; +} + +struct codetag *codetag_next_ct(struct codetag_iterator *iter) +{ + struct codetag_type *cttype = iter->cttype; + struct codetag_module *cmod; + struct codetag *ct; + + lockdep_assert_held(&cttype->mod_lock); + + if (unlikely(idr_is_empty(&cttype->mod_idr))) + return NULL; + + ct = NULL; + while (true) { + cmod = idr_find(&cttype->mod_idr, iter->mod_id); + + /* If module was removed move to the next one */ + if (!cmod) + cmod = idr_get_next_ul(&cttype->mod_idr, + &iter->mod_id); + + /* Exit if no more modules */ + if (!cmod) + break; + + if (cmod != iter->cmod) { + iter->cmod = cmod; + ct = get_first_module_ct(cmod); + } else + ct = get_next_module_ct(iter); + + if (ct) + break; + + iter->mod_id++; + } + + iter->ct = ct; + return ct; +} + +void codetag_to_text(struct seq_buf *out, struct codetag *ct) +{ + if (ct->modname) + seq_buf_printf(out, "%s:%u [%s] func:%s", + ct->filename, ct->lineno, + ct->modname, ct->function); + else + seq_buf_printf(out, "%s:%u func:%s", + ct->filename, ct->lineno, ct->function); +} + +static inline size_t range_size(const struct codetag_type *cttype, + const struct codetag_range *range) +{ + return ((char *)range->stop - (char *)range->start) / + cttype->desc.tag_size; +} + +#ifdef CONFIG_MODULES +static void *get_symbol(struct module *mod, const char *prefix, const char *name) +{ + DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN); + const char *buf; + void *ret; + + seq_buf_printf(&sb, "%s%s", prefix, name); + if (seq_buf_has_overflowed(&sb)) + return NULL; + + buf = seq_buf_str(&sb); + preempt_disable(); + ret = mod ? + (void *)find_kallsyms_symbol_value(mod, buf) : + (void *)kallsyms_lookup_name(buf); + preempt_enable(); + + return ret; +} + +static struct codetag_range get_section_range(struct module *mod, + const char *section) +{ + return (struct codetag_range) { + get_symbol(mod, "__start_", section), + get_symbol(mod, "__stop_", section), + }; +} + +static int codetag_module_init(struct codetag_type *cttype, struct module *mod) +{ + struct codetag_range range; + struct codetag_module *cmod; + int err; + + range = get_section_range(mod, cttype->desc.section); + if (!range.start || !range.stop) { + pr_warn("Failed to load code tags of type %s from the module %s\n", + cttype->desc.section, + mod ? mod->name : "(built-in)"); + return -EINVAL; + } + + /* Ignore empty ranges */ + if (range.start == range.stop) + return 0; + + BUG_ON(range.start > range.stop); + + cmod = kmalloc(sizeof(*cmod), GFP_KERNEL); + if (unlikely(!cmod)) + return -ENOMEM; + + cmod->mod = mod; + cmod->range = range; + + down_write(&cttype->mod_lock); + err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); + if (err >= 0) { + cttype->count += range_size(cttype, &range); + if (cttype->desc.module_load) + cttype->desc.module_load(cttype, cmod); + } + up_write(&cttype->mod_lock); + + if (err < 0) { + kfree(cmod); + return err; + } + + return 0; +} + +void codetag_load_module(struct module *mod) +{ + struct codetag_type *cttype; + + if (!mod) + return; + + mutex_lock(&codetag_lock); + list_for_each_entry(cttype, &codetag_types, link) + codetag_module_init(cttype, mod); + mutex_unlock(&codetag_lock); +} + +bool codetag_unload_module(struct module *mod) +{ + struct codetag_type *cttype; + bool unload_ok = true; + + if (!mod) + return true; + + mutex_lock(&codetag_lock); + list_for_each_entry(cttype, &codetag_types, link) { + struct codetag_module *found = NULL; + struct codetag_module *cmod; + unsigned long mod_id, tmp; + + down_write(&cttype->mod_lock); + idr_for_each_entry_ul(&cttype->mod_idr, cmod, tmp, mod_id) { + if (cmod->mod && cmod->mod == mod) { + found = cmod; + break; + } + } + if (found) { + if (cttype->desc.module_unload) + if (!cttype->desc.module_unload(cttype, cmod)) + unload_ok = false; + + cttype->count -= range_size(cttype, &cmod->range); + idr_remove(&cttype->mod_idr, mod_id); + kfree(cmod); + } + up_write(&cttype->mod_lock); + } + mutex_unlock(&codetag_lock); + + return unload_ok; +} + +#else /* CONFIG_MODULES */ +static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; } +#endif /* CONFIG_MODULES */ + +struct codetag_type * +codetag_register_type(const struct codetag_type_desc *desc) +{ + struct codetag_type *cttype; + int err; + + BUG_ON(desc->tag_size <= 0); + + cttype = kzalloc(sizeof(*cttype), GFP_KERNEL); + if (unlikely(!cttype)) + return ERR_PTR(-ENOMEM); + + cttype->desc = *desc; + idr_init(&cttype->mod_idr); + init_rwsem(&cttype->mod_lock); + + err = codetag_module_init(cttype, NULL); + if (unlikely(err)) { + kfree(cttype); + return ERR_PTR(err); + } + + mutex_lock(&codetag_lock); + list_add_tail(&cttype->link, &codetag_types); + mutex_unlock(&codetag_lock); + + return cttype; +} diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 45436bfc6d..b01253cac7 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS config CRYPTO_LIB_AES tristate +config CRYPTO_LIB_AESCFB + tristate + select CRYPTO_LIB_AES + select CRYPTO_LIB_UTILS + config CRYPTO_LIB_AESGCM tristate select CRYPTO_LIB_AES diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 8d1446c2be..969baab8c8 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o libaes-y := aes.o +obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o +libaescfb-y := aescfb.o + obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o libaesgcm-y := aesgcm.o diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c new file mode 100644 index 0000000000..749dc1258a --- /dev/null +++ b/lib/crypto/aescfb.c @@ -0,0 +1,257 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Minimal library implementation of AES in CFB mode + * + * Copyright 2023 Google LLC + */ + +#include <linux/module.h> + +#include <crypto/algapi.h> +#include <crypto/aes.h> + +#include <asm/irqflags.h> + +static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst, + const void *src) +{ + unsigned long flags; + + /* + * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV + * and ciphertext), making it susceptible to timing attacks on the + * encryption key. The AES library already mitigates this risk to some + * extent by pulling the entire S-box into the caches before doing any + * substitutions, but this strategy is more effective when running with + * interrupts disabled. + */ + local_irq_save(flags); + aes_encrypt(ctx, dst, src); + local_irq_restore(flags); +} + +/** + * aescfb_encrypt - Perform AES-CFB encryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the ciphertext output buffer + * @src: Pointer the plaintext (may equal @dst for encryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[AES_BLOCK_SIZE]; + const u8 *v = iv; + + while (len > 0) { + aescfb_encrypt_block(ctx, ks, v); + crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE)); + v = dst; + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_encrypt); + +/** + * aescfb_decrypt - Perform AES-CFB decryption on a block of data + * + * @ctx: The AES-CFB key schedule + * @dst: Pointer to the plaintext output buffer + * @src: Pointer the ciphertext (may equal @dst for decryption in place) + * @len: The size in bytes of the plaintext and ciphertext. + * @iv: The initialization vector (IV) to use for this block of data + */ +void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src, + int len, const u8 iv[AES_BLOCK_SIZE]) +{ + u8 ks[2][AES_BLOCK_SIZE]; + + aescfb_encrypt_block(ctx, ks[0], iv); + + for (int i = 0; len > 0; i ^= 1) { + if (len > AES_BLOCK_SIZE) + /* + * Generate the keystream for the next block before + * performing the XOR, as that may update in place and + * overwrite the ciphertext. + */ + aescfb_encrypt_block(ctx, ks[!i], src); + + crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE)); + + dst += AES_BLOCK_SIZE; + src += AES_BLOCK_SIZE; + len -= AES_BLOCK_SIZE; + } + + memzero_explicit(ks, sizeof(ks)); +} +EXPORT_SYMBOL(aescfb_decrypt); + +MODULE_DESCRIPTION("Generic AES-CFB library"); +MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); +MODULE_LICENSE("GPL"); + +#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS + +/* + * Test code below. Vectors taken from crypto/testmgr.h + */ + +static struct { + u8 ptext[64]; + u8 ctext[64]; + + u8 key[AES_MAX_KEY_SIZE]; + u8 iv[AES_BLOCK_SIZE]; + + int klen; + int len; +} const aescfb_tv[] __initconst = { + { /* From NIST SP800-38A */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f" + "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b" + "\x26\x75\x1f\x67\xa3\xcb\xb1\x40" + "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf" + "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e" + "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6", + .len = 64, + }, { + .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52" + "\xc8\x10\xf3\x2b\x80\x90\x79\xe5" + "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b", + .klen = 24, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab" + "\x34\xc2\x59\x09\xc9\x9a\x41\x74" + "\x67\xce\x7f\x7f\x81\x17\x36\x21" + "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a" + "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1" + "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9" + "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0" + "\x42\xae\x8f\xba\x58\x4b\x09\xff", + .len = 64, + }, { + .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe" + "\x2b\x73\xae\xf0\x85\x7d\x77\x81" + "\x1f\x35\x2c\x07\x3b\x61\x08\xd7" + "\x2d\x98\x10\xa3\x09\x14\xdf\xf4", + .klen = 32, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae\x2d\x8a\x57\x1e\x03\xac\x9c" + "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51" + "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11" + "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef" + "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17" + "\xad\x2b\x41\x7b\xe6\x6c\x37\x10", + .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b" + "\x7e\xcd\x84\x86\x98\x5d\x38\x60" + "\x39\xff\xed\x14\x3b\x28\xb1\xc8" + "\x32\x11\x3c\x63\x31\xe5\x40\x7b" + "\xdf\x10\x13\x24\x15\xe5\x4b\x92" + "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9" + "\x75\xa3\x85\x74\x1a\xb9\xce\xf8" + "\x20\x31\x62\x3d\x55\xb1\xe4\x71", + .len = 64, + }, { /* > 16 bytes, not a multiple of 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96" + "\xe9\x3d\x7e\x11\x73\x93\x17\x2a" + "\xae", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20" + "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a" + "\xc8", + .len = 17, + }, { /* < 16 bytes */ + .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6" + "\xab\xf7\x15\x88\x09\xcf\x4f\x3c", + .klen = 16, + .iv = "\x00\x01\x02\x03\x04\x05\x06\x07" + "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f", + .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f", + .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad", + .len = 7, + }, +}; + +static int __init libaescfb_init(void) +{ + for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) { + struct crypto_aes_ctx ctx; + u8 buf[64]; + + if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) { + pr_err("aes_expandkey() failed on vector %d\n", i); + return -ENODEV; + } + + aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len, + aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #1 failed on vector %d\n", i); + return -ENODEV; + } + + /* decrypt in place */ + aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) { + pr_err("aescfb_decrypt() failed on vector %d\n", i); + return -ENODEV; + } + + /* encrypt in place */ + aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv); + if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) { + pr_err("aescfb_encrypt() #2 failed on vector %d\n", i); + + return -ENODEV; + } + + } + return 0; +} +module_init(libaescfb_init); + +static void __exit libaescfb_exit(void) +{ +} +module_exit(libaescfb_exit); +#endif diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 3518e7394e..ca736166f1 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c @@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd) RUNB) */ symCount = symTotal+2; for (j = 0; j < groupCount; j++) { - unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; + unsigned char length[MAX_SYMBOLS]; + unsigned short temp[MAX_HUFCODE_BITS+1]; int minLen, maxLen, pp; /* Read Huffman code lengths for each symbol. They're stored in a way similar to mtf; record a starting diff --git a/lib/devres.c b/lib/devres.c index fe0c63caeb..4fc152de6d 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> #include <linux/device.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/gfp.h> +#include <linux/errno.h> #include <linux/export.h> +#include <linux/gfp_types.h> +#include <linux/io.h> +#include <linux/ioport.h> #include <linux/of_address.h> +#include <linux/types.h> enum devm_ioremap_type { DEVM_IOREMAP = 0, @@ -125,12 +128,13 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res, resource_size_t size; void __iomem *dest_ptr; char *pretty_name; + int ret; BUG_ON(!dev); if (!res || resource_type(res) != IORESOURCE_MEM) { - dev_err(dev, "invalid resource %pR\n", res); - return IOMEM_ERR_PTR(-EINVAL); + ret = dev_err_probe(dev, -EINVAL, "invalid resource %pR\n", res); + return IOMEM_ERR_PTR(ret); } if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED) @@ -144,20 +148,20 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res, else pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); if (!pretty_name) { - dev_err(dev, "can't generate pretty name for resource %pR\n", res); - return IOMEM_ERR_PTR(-ENOMEM); + ret = dev_err_probe(dev, -ENOMEM, "can't generate pretty name for resource %pR\n", res); + return IOMEM_ERR_PTR(ret); } if (!devm_request_mem_region(dev, res->start, size, pretty_name)) { - dev_err(dev, "can't request region for resource %pR\n", res); - return IOMEM_ERR_PTR(-EBUSY); + ret = dev_err_probe(dev, -EBUSY, "can't request region for resource %pR\n", res); + return IOMEM_ERR_PTR(ret); } dest_ptr = __devm_ioremap(dev, res->start, size, type); if (!dest_ptr) { - dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); - dest_ptr = IOMEM_ERR_PTR(-ENOMEM); + ret = dev_err_probe(dev, -ENOMEM, "ioremap failed for resource %pR\n", res); + return IOMEM_ERR_PTR(ret); } return dest_ptr; diff --git a/lib/dim/Makefile b/lib/dim/Makefile index 1d6858a108..c4cc4026c4 100644 --- a/lib/dim/Makefile +++ b/lib/dim/Makefile @@ -2,6 +2,6 @@ # DIM Dynamic Interrupt Moderation library # -obj-$(CONFIG_DIMLIB) += dim.o +obj-$(CONFIG_DIMLIB) += dimlib.o -dim-y := dim.o net_dim.o rdma_dim.o +dimlib-objs := dim.o net_dim.o rdma_dim.o diff --git a/lib/dim/dim.c b/lib/dim/dim.c index e89aaf07bd..83b65ac74d 100644 --- a/lib/dim/dim.c +++ b/lib/dim/dim.c @@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, return true; } EXPORT_SYMBOL(dim_calc_stats); + +MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index a1389db1c3..e49deddd3d 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -15,12 +15,10 @@ #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) -static void dql_check_stall(struct dql *dql) +static void dql_check_stall(struct dql *dql, unsigned short stall_thrs) { - unsigned short stall_thrs; unsigned long now; - stall_thrs = READ_ONCE(dql->stall_thrs); if (!stall_thrs) return; @@ -86,9 +84,16 @@ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; + unsigned short stall_thrs; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); + /* Read stall_thrs in advance since it belongs to the same (first) + * cache line as ->num_queued. This way, dql_check_stall() does not + * need to touch the first cache line again later, reducing the window + * of possible false sharing. + */ + stall_thrs = READ_ONCE(dql->stall_thrs); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); @@ -178,7 +183,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->num_completed = completed; dql->prev_num_queued = num_queued; - dql_check_stall(dql); + dql_check_stall(dql, stall_thrs); } EXPORT_SYMBOL(dql_completed); diff --git a/lib/find_bit.c b/lib/find_bit.c index 32f99e9a67..0836bb3d76 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -87,7 +87,7 @@ out: \ if (sz % BITS_PER_LONG) \ tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \ found: \ - sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \ + sz = idx * BITS_PER_LONG + fns(tmp, nr); \ out: \ sz; \ }) @@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1, EXPORT_SYMBOL(_find_first_and_bit); #endif +/* + * Find the first set bit in three memory regions. + */ +unsigned long _find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size); +} +EXPORT_SYMBOL(_find_first_and_and_bit); + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index ad29721b95..e17d520f53 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(), - * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). + * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy() + * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). * * For corner cases with UBSAN, try testing with: * @@ -18,8 +18,10 @@ /* We don't need to fill dmesg with the fortify WARNs during testing. */ #ifdef DEBUG # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x) +# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x) #else # define FORTIFY_REPORT_KUNIT(x...) do { } while (0) +# define FORTIFY_WARN_KUNIT(x...) do { } while (0) #endif /* Redefine fortify_panic() to track failures. */ @@ -30,6 +32,14 @@ void fortify_add_kunit_error(int write); return (retfail); \ } while (0) +/* Redefine fortify_warn_once() to track memcpy() failures. */ +#define fortify_warn_once(chk_func, x...) do { \ + bool __result = chk_func; \ + FORTIFY_WARN_KUNIT(__result, x); \ + if (__result) \ + fortify_add_kunit_error(1); \ +} while (0) + #include <kunit/device.h> #include <kunit/test.h> #include <kunit/test-bug.h> @@ -71,7 +81,7 @@ void fortify_add_kunit_error(int write) kunit_put_resource(resource); } -static void known_sizes_test(struct kunit *test) +static void fortify_test_known_sizes(struct kunit *test) { KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10); @@ -104,7 +114,7 @@ static noinline size_t want_minus_one(int pick) return __compiletime_strlen(str); } -static void control_flow_split_test(struct kunit *test) +static void fortify_test_control_flow_split(struct kunit *test) { KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX); } @@ -180,11 +190,11 @@ static volatile size_t unknown_size = 50; #endif #define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \ -static void alloc_size_##allocator##_const_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \ { \ CONST_TEST_BODY(TEST_##allocator); \ } \ -static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \ +static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \ { \ DYNAMIC_TEST_BODY(TEST_##allocator); \ } @@ -226,9 +236,6 @@ static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \ kfree(p)); \ checker(expected_size, __kmalloc(alloc_size, gfp), \ kfree(p)); \ - checker(expected_size, \ - __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \ - kfree(p)); \ \ orig = kmalloc(alloc_size, gfp); \ KUNIT_EXPECT_TRUE(test, orig != NULL); \ @@ -353,6 +360,31 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) } while (0) DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) +static const char * const test_strs[] = { + "", + "Hello there", + "A longer string, just for variety", +}; + +#define TEST_realloc(checker) do { \ + gfp_t gfp = GFP_KERNEL; \ + size_t len; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \ + len = strlen(test_strs[i]); \ + KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \ + checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \ + kfree(p)); \ + checker(len, kmemdup(test_strs[i], len, gfp), \ + kfree(p)); \ + } \ +} while (0) +static void fortify_test_realloc_size(struct kunit *test) +{ + TEST_realloc(check_dynamic); +} + /* * We can't have an array at the end of a structure or else * builds without -fstrict-flex-arrays=3 will report them as @@ -368,7 +400,7 @@ struct fortify_padding { /* Force compiler into not being able to resolve size at compile-time. */ static volatile int unconst; -static void strlen_test(struct kunit *test) +static void fortify_test_strlen(struct kunit *test) { struct fortify_padding pad = { }; int i, end = sizeof(pad.buf) - 1; @@ -391,7 +423,7 @@ static void strlen_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); } -static void strnlen_test(struct kunit *test) +static void fortify_test_strnlen(struct kunit *test) { struct fortify_padding pad = { }; int i, end = sizeof(pad.buf) - 1; @@ -429,7 +461,7 @@ static void strnlen_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void strcpy_test(struct kunit *test) +static void fortify_test_strcpy(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf) + 1] = { }; @@ -487,7 +519,7 @@ static void strcpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strncpy_test(struct kunit *test) +static void fortify_test_strncpy(struct kunit *test) { struct fortify_padding pad = { }; char src[] = "Copy me fully into a small buffer and I will overflow!"; @@ -546,7 +578,7 @@ static void strncpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strscpy_test(struct kunit *test) +static void fortify_test_strscpy(struct kunit *test) { struct fortify_padding pad = { }; char src[] = "Copy me fully into a small buffer and I will overflow!"; @@ -603,7 +635,7 @@ static void strscpy_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strcat_test(struct kunit *test) +static void fortify_test_strcat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf) / 2] = { }; @@ -660,7 +692,7 @@ static void strcat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strncat_test(struct kunit *test) +static void fortify_test_strncat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf)] = { }; @@ -733,7 +765,7 @@ static void strncat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void strlcat_test(struct kunit *test) +static void fortify_test_strlcat(struct kunit *test) { struct fortify_padding pad = { }; char src[sizeof(pad.buf)] = { }; @@ -818,7 +850,75 @@ static void strlcat_test(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } -static void memscan_test(struct kunit *test) +/* Check for 0-sized arrays... */ +struct fortify_zero_sized { + unsigned long bytes_before; + char buf[0]; + unsigned long bytes_after; +}; + +#define __fortify_test(memfunc) \ +static void fortify_test_##memfunc(struct kunit *test) \ +{ \ + struct fortify_zero_sized zero = { }; \ + struct fortify_padding pad = { }; \ + char srcA[sizeof(pad.buf) + 2]; \ + char srcB[sizeof(pad.buf) + 2]; \ + size_t len = sizeof(pad.buf) + unconst; \ + \ + memset(srcA, 'A', sizeof(srcA)); \ + KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \ + memset(srcB, 'B', sizeof(srcB)); \ + KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \ + \ + memfunc(pad.buf, srcA, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf + 1, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len - 1); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len + 1); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \ + memfunc(pad.buf + 1, srcB, len); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \ + \ + /* Reset error counter. */ \ + fortify_write_overflows = 0; \ + /* Copy nothing into nothing: no errors. */ \ + memfunc(zero.buf, srcB, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + /* We currently explicitly ignore zero-sized dests. */ \ + memfunc(zero.buf, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ +} +__fortify_test(memcpy) +__fortify_test(memmove) + +static void fortify_test_memscan(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + strlen("Where oh where is "); @@ -837,7 +937,7 @@ static void memscan_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memchr_test(struct kunit *test) +static void fortify_test_memchr(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + strlen("Where oh where is "); @@ -856,7 +956,7 @@ static void memchr_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memchr_inv_test(struct kunit *test) +static void fortify_test_memchr_inv(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; char *mem = haystack + 1; @@ -876,7 +976,7 @@ static void memchr_inv_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void memcmp_test(struct kunit *test) +static void fortify_test_memcmp(struct kunit *test) { char one[] = "My mind is going ..."; char two[] = "My mind is going ... I can feel it."; @@ -887,7 +987,7 @@ static void memcmp_test(struct kunit *test) KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); /* Still in bounds, but no longer matching. */ - KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32); + KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); /* Catch too-large ranges. */ @@ -898,7 +998,7 @@ static void memcmp_test(struct kunit *test) KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); } -static void kmemdup_test(struct kunit *test) +static void fortify_test_kmemdup(struct kunit *test) { char src[] = "I got Doom running on it!"; char *copy; @@ -958,31 +1058,33 @@ static int fortify_test_init(struct kunit *test) } static struct kunit_case fortify_test_cases[] = { - KUNIT_CASE(known_sizes_test), - KUNIT_CASE(control_flow_split_test), - KUNIT_CASE(alloc_size_kmalloc_const_test), - KUNIT_CASE(alloc_size_kmalloc_dynamic_test), - KUNIT_CASE(alloc_size_vmalloc_const_test), - KUNIT_CASE(alloc_size_vmalloc_dynamic_test), - KUNIT_CASE(alloc_size_kvmalloc_const_test), - KUNIT_CASE(alloc_size_kvmalloc_dynamic_test), - KUNIT_CASE(alloc_size_devm_kmalloc_const_test), - KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test), - KUNIT_CASE(strlen_test), - KUNIT_CASE(strnlen_test), - KUNIT_CASE(strcpy_test), - KUNIT_CASE(strncpy_test), - KUNIT_CASE(strscpy_test), - KUNIT_CASE(strcat_test), - KUNIT_CASE(strncat_test), - KUNIT_CASE(strlcat_test), + KUNIT_CASE(fortify_test_known_sizes), + KUNIT_CASE(fortify_test_control_flow_split), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const), + KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic), + KUNIT_CASE(fortify_test_realloc_size), + KUNIT_CASE(fortify_test_strlen), + KUNIT_CASE(fortify_test_strnlen), + KUNIT_CASE(fortify_test_strcpy), + KUNIT_CASE(fortify_test_strncpy), + KUNIT_CASE(fortify_test_strscpy), + KUNIT_CASE(fortify_test_strcat), + KUNIT_CASE(fortify_test_strncat), + KUNIT_CASE(fortify_test_strlcat), /* skip memset: performs bounds checking on whole structs */ - /* skip memcpy: still using warn-and-overwrite instead of hard-fail */ - KUNIT_CASE(memscan_test), - KUNIT_CASE(memchr_test), - KUNIT_CASE(memchr_inv_test), - KUNIT_CASE(memcmp_test), - KUNIT_CASE(kmemdup_test), + KUNIT_CASE(fortify_test_memcpy), + KUNIT_CASE(fortify_test_memmove), + KUNIT_CASE(fortify_test_memscan), + KUNIT_CASE(fortify_test_memchr), + KUNIT_CASE(fortify_test_memchr_inv), + KUNIT_CASE(fortify_test_memcmp), + KUNIT_CASE(fortify_test_kmemdup), {} }; diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 5de7c04e05..2fd5712fb7 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -16,9 +16,8 @@ * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ -void __attribute__((weak)) __iowrite32_copy(void __iomem *to, - const void *from, - size_t count) +#ifndef __iowrite32_copy +void __iowrite32_copy(void __iomem *to, const void *from, size_t count) { u32 __iomem *dst = to; const u32 *src = from; @@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, __raw_writel(*src++, dst++); } EXPORT_SYMBOL_GPL(__iowrite32_copy); +#endif /** * __ioread32_copy - copy data from MMIO space, in 32-bit units @@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy); * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ -void __attribute__((weak)) __iowrite64_copy(void __iomem *to, - const void *from, - size_t count) +#ifndef __iowrite64_copy +void __iowrite64_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; @@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to, __iowrite32_copy(to, from, count * 2); #endif } - EXPORT_SYMBOL_GPL(__iowrite64_copy); +#endif diff --git a/lib/kfifo.c b/lib/kfifo.c index 12f5a347aa..a8b2eed905 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -5,13 +5,14 @@ * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> */ -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/slab.h> +#include <linux/dma-mapping.h> #include <linux/err.h> +#include <linux/export.h> +#include <linux/kfifo.h> #include <linux/log2.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/uaccess.h> -#include <linux/kfifo.h> /* * internal helper to calculate the unused elements in a fifo @@ -163,6 +164,19 @@ unsigned int __kfifo_out_peek(struct __kfifo *fifo, } EXPORT_SYMBOL(__kfifo_out_peek); +unsigned int __kfifo_out_linear(struct __kfifo *fifo, + unsigned int *tail, unsigned int n) +{ + unsigned int size = fifo->mask + 1; + unsigned int off = fifo->out & fifo->mask; + + if (tail) + *tail = off; + + return min3(n, fifo->in - fifo->out, size - off); +} +EXPORT_SYMBOL(__kfifo_out_linear); + unsigned int __kfifo_out(struct __kfifo *fifo, void *buf, unsigned int len) { @@ -292,51 +306,31 @@ int __kfifo_to_user(struct __kfifo *fifo, void __user *to, } EXPORT_SYMBOL(__kfifo_to_user); -static int setup_sgl_buf(struct scatterlist *sgl, void *buf, - int nents, unsigned int len) +static unsigned int setup_sgl_buf(struct __kfifo *fifo, struct scatterlist *sgl, + unsigned int data_offset, int nents, + unsigned int len, dma_addr_t dma) { - int n; - unsigned int l; - unsigned int off; - struct page *page; + const void *buf = fifo->data + data_offset; - if (!nents) + if (!nents || !len) return 0; - if (!len) - return 0; + sg_set_buf(sgl, buf, len); - n = 0; - page = virt_to_page(buf); - off = offset_in_page(buf); - l = 0; - - while (len >= l + PAGE_SIZE - off) { - struct page *npage; - - l += PAGE_SIZE; - buf += PAGE_SIZE; - npage = virt_to_page(buf); - if (page_to_phys(page) != page_to_phys(npage) - l) { - sg_set_page(sgl, page, l - off, off); - sgl = sg_next(sgl); - if (++n == nents || sgl == NULL) - return n; - page = npage; - len -= l - off; - l = off = 0; - } + if (dma != DMA_MAPPING_ERROR) { + sg_dma_address(sgl) = dma + data_offset; + sg_dma_len(sgl) = len; } - sg_set_page(sgl, page, len, off); - return n + 1; + + return 1; } static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, - int nents, unsigned int len, unsigned int off) + int nents, unsigned int len, unsigned int off, dma_addr_t dma) { unsigned int size = fifo->mask + 1; unsigned int esize = fifo->esize; - unsigned int l; + unsigned int len_to_end; unsigned int n; off &= fifo->mask; @@ -345,16 +339,17 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, size *= esize; len *= esize; } - l = min(len, size - off); + len_to_end = min(len, size - off); - n = setup_sgl_buf(sgl, fifo->data + off, nents, l); - n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); + n = setup_sgl_buf(fifo, sgl, off, nents, len_to_end, dma); + n += setup_sgl_buf(fifo, sgl + n, 0, nents - n, len - len_to_end, dma); return n; } unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) + struct scatterlist *sgl, int nents, unsigned int len, + dma_addr_t dma) { unsigned int l; @@ -362,12 +357,13 @@ unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, if (len > l) len = l; - return setup_sgl(fifo, sgl, nents, len, fifo->in); + return setup_sgl(fifo, sgl, nents, len, fifo->in, dma); } EXPORT_SYMBOL(__kfifo_dma_in_prepare); unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) + struct scatterlist *sgl, int nents, unsigned int len, + dma_addr_t dma) { unsigned int l; @@ -375,7 +371,7 @@ unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, if (len > l) len = l; - return setup_sgl(fifo, sgl, nents, len, fifo->out); + return setup_sgl(fifo, sgl, nents, len, fifo->out, dma); } EXPORT_SYMBOL(__kfifo_dma_out_prepare); @@ -473,6 +469,19 @@ unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, } EXPORT_SYMBOL(__kfifo_out_peek_r); +unsigned int __kfifo_out_linear_r(struct __kfifo *fifo, + unsigned int *tail, unsigned int n, size_t recsize) +{ + if (fifo->in == fifo->out) + return 0; + + if (tail) + *tail = fifo->out + recsize; + + return min(n, __kfifo_peek_n(fifo, recsize)); +} +EXPORT_SYMBOL(__kfifo_out_linear_r); + unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, unsigned int len, size_t recsize) { @@ -546,7 +555,8 @@ int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, EXPORT_SYMBOL(__kfifo_to_user_r); unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma) { BUG_ON(!nents); @@ -555,7 +565,7 @@ unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, if (len + recsize > kfifo_unused(fifo)) return 0; - return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); + return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize, dma); } EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); @@ -569,7 +579,8 @@ void __kfifo_dma_in_finish_r(struct __kfifo *fifo, EXPORT_SYMBOL(__kfifo_dma_in_finish_r); unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma) { BUG_ON(!nents); @@ -578,15 +589,7 @@ unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, if (len + recsize > fifo->in - fifo->out) return 0; - return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); + return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize, dma); } EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); -void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) -{ - unsigned int len; - - len = __kfifo_peek_n(fifo, recsize); - fifo->out += len + recsize; -} -EXPORT_SYMBOL(__kfifo_dma_out_finish_r); diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 03b427e270..b7f2fa08d9 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -433,8 +433,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env) len = strlen(env->envp[i]) + 1; if (i != env->envp_idx - 1) { + /* @env->envp[] contains pointers to @env->buf[] + * with @env->buflen chars, and we are removing + * variable MODALIAS here pointed by @env->envp[i] + * with length @len as shown below: + * + * 0 @env->buf[] @env->buflen + * --------------------------------------------- + * ^ ^ ^ ^ + * | |-> @len <-| target block | + * @env->envp[0] @env->envp[i] @env->envp[i + 1] + * + * so the "target block" indicated above is moved + * backward by @len, and its right size is + * @env->buflen - (@env->envp[i + 1] - @env->envp[0]). + */ memmove(env->envp[i], env->envp[i + 1], - env->buflen - len); + env->buflen - (env->envp[i + 1] - env->envp[0])); for (j = i; j < env->envp_idx - 1; j++) env->envp[j] = env->envp[j + 1] - len; diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig index 68a6daec0a..34d7242d52 100644 --- a/lib/kunit/Kconfig +++ b/lib/kunit/Kconfig @@ -24,6 +24,17 @@ config KUNIT_DEBUGFS test suite, which allow users to see results of the last test suite run that occurred. +config KUNIT_FAULT_TEST + bool "Enable KUnit tests which print BUG stacktraces" + depends on KUNIT_TEST + depends on !UML + default y + help + Enables fault handling tests for the KUnit framework. These tests may + trigger a kernel BUG(), and the associated stack trace, even when they + pass. If this conflicts with your test infrastrcture (or is confusing + or annoying), they can be disabled by setting this to N. + config KUNIT_TEST tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS default KUNIT_ALL_TESTS diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index f7980ef236..e3412e0ca3 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -109,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = { .test_cases = kunit_try_catch_test_cases, }; +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + +static void kunit_test_null_dereference(void *data) +{ + struct kunit *test = data; + int *null = NULL; + + *null = 0; + + KUNIT_FAIL(test, "This line should never be reached\n"); +} + +static void kunit_test_fault_null_dereference(struct kunit *test) +{ + struct kunit_try_catch_test_context *ctx = test->priv; + struct kunit_try_catch *try_catch = ctx->try_catch; + + kunit_try_catch_init(try_catch, + test, + kunit_test_null_dereference, + kunit_test_catch); + kunit_try_catch_run(try_catch, test); + + KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR); + KUNIT_EXPECT_TRUE(test, ctx->function_called); +} + +#endif /* CONFIG_KUNIT_FAULT_TEST */ + +static struct kunit_case kunit_fault_test_cases[] = { +#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST) + KUNIT_CASE(kunit_test_fault_null_dereference), +#endif /* CONFIG_KUNIT_FAULT_TEST */ + {} +}; + +static struct kunit_suite kunit_fault_test_suite = { + .name = "kunit_fault", + .init = kunit_try_catch_test_init, + .test_cases = kunit_fault_test_cases, +}; + /* * Context for testing test managed resources * is_resource_initialized is used to test arbitrary resources @@ -826,6 +868,7 @@ static struct kunit_suite kunit_current_test_suite = { kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite, &kunit_log_test_suite, &kunit_status_test_suite, - &kunit_current_test_suite, &kunit_device_test_suite); + &kunit_current_test_suite, &kunit_device_test_suite, + &kunit_fault_test_suite); MODULE_LICENSE("GPL v2"); diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c index 03fb511826..7511442ea9 100644 --- a/lib/kunit/string-stream-test.c +++ b/lib/kunit/string-stream-test.c @@ -22,18 +22,10 @@ struct string_stream_test_priv { }; /* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */ -static void kfree_wrapper(void *p) -{ - kfree(p); -} +KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *); /* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */ -static void cleanup_raw_stream(void *p) -{ - struct string_stream *stream = p; - - string_stream_destroy(stream); -} +KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *); static char *get_concatenated_string(struct kunit *test, struct string_stream *stream) { diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c index 9c9e4dcf06..6bbe0025b0 100644 --- a/lib/kunit/try-catch.c +++ b/lib/kunit/try-catch.c @@ -18,7 +18,7 @@ void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch) { try_catch->try_result = -EFAULT; - kthread_complete_and_exit(try_catch->try_completion, -EFAULT); + kthread_exit(0); } EXPORT_SYMBOL_GPL(kunit_try_catch_throw); @@ -26,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data) { struct kunit_try_catch *try_catch = data; + try_catch->try_result = -EINTR; try_catch->try(try_catch->context); + if (try_catch->try_result == -EINTR) + try_catch->try_result = 0; - kthread_complete_and_exit(try_catch->try_completion, 0); + return 0; } static unsigned long kunit_test_timeout(void) @@ -58,24 +61,31 @@ static unsigned long kunit_test_timeout(void) void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) { - DECLARE_COMPLETION_ONSTACK(try_completion); struct kunit *test = try_catch->test; struct task_struct *task_struct; + struct completion *task_done; int exit_code, time_remaining; try_catch->context = context; - try_catch->try_completion = &try_completion; try_catch->try_result = 0; task_struct = kthread_create(kunit_generic_run_threadfn_adapter, try_catch, "kunit_try_catch_thread"); if (IS_ERR(task_struct)) { + try_catch->try_result = PTR_ERR(task_struct); try_catch->catch(try_catch->context); return; } get_task_struct(task_struct); + /* + * As for a vfork(2), task_struct->vfork_done (pointing to the + * underlying kthread->exited) can be used to wait for the end of a + * kernel thread. It is set to NULL when the thread exits, so we + * keep a copy here. + */ + task_done = task_struct->vfork_done; wake_up_process(task_struct); - time_remaining = wait_for_completion_timeout(&try_completion, + time_remaining = wait_for_completion_timeout(task_done, kunit_test_timeout()); if (time_remaining == 0) { try_catch->try_result = -ETIMEDOUT; @@ -90,9 +100,13 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context) if (exit_code == -EFAULT) try_catch->try_result = 0; - else if (exit_code == -EINTR) - kunit_err(test, "wake_up_process() was never called\n"); - else if (exit_code == -ETIMEDOUT) + else if (exit_code == -EINTR) { + if (test->last_seen.file) + kunit_err(test, "try faulted: last line seen %s:%d\n", + test->last_seen.file, test->last_seen.line); + else + kunit_err(test, "try faulted\n"); + } else if (exit_code == -ETIMEDOUT) kunit_err(test, "try timed out\n"); else if (exit_code) kunit_err(test, "Unknown error: %d\n", exit_code); diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 859b67c4d6..27e0c8ee71 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -194,7 +194,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } struct bvec_test_range { @@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -359,7 +359,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static void iov_kunit_destroy_xarray(void *data) @@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test) return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -516,7 +516,7 @@ stop: return; } - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test) stop: KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_EQ(test, iter.count, 0); - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } /* @@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test) } stop: - KUNIT_SUCCEED(); + KUNIT_SUCCEED(test); } static struct kunit_case __refdata iov_kunit_cases[] = { diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c index d42cebf740..d3b64b10da 100644 --- a/lib/math/prime_numbers.c +++ b/lib/math/prime_numbers.c @@ -6,8 +6,6 @@ #include <linux/prime_numbers.h> #include <linux/slab.h> -#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) - struct primes { struct rcu_head rcu; unsigned long last, sz; diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c index fd16e6ce53..20ea9038c3 100644 --- a/lib/memcpy_kunit.c +++ b/lib/memcpy_kunit.c @@ -493,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test) } } -static void strtomem_test(struct kunit *test) -{ - static const char input[sizeof(unsigned long)] = "hi"; - static const char truncate[] = "this is too long"; - struct { - unsigned long canary1; - unsigned char output[sizeof(unsigned long)] __nonstring; - unsigned long canary2; - } wrap; - - memset(&wrap, 0xFF, sizeof(wrap)); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, - "bad initial canary value"); - KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, - "bad initial canary value"); - - /* Check unpadded copy leaves surroundings untouched. */ - strtomem(wrap.output, input); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated copy leaves surroundings untouched. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check padded copy leaves only string padded. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem_pad(wrap.output, input, 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); - KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); - for (size_t i = 2; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); - - /* Check truncated padded copy has no padding. */ - memset(&wrap, 0xFF, sizeof(wrap)); - strtomem(wrap.output, truncate); - KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); - for (size_t i = 0; i < sizeof(wrap.output); i++) - KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); - KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); -} - static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE(memset_test), KUNIT_CASE(memcpy_test), @@ -552,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = { KUNIT_CASE_SLOW(memmove_test), KUNIT_CASE_SLOW(memmove_large_test), KUNIT_CASE_SLOW(memmove_overlap_test), - KUNIT_CASE(strtomem_test), {} }; diff --git a/lib/objagg.c b/lib/objagg.c index 1e248629ed..1608895b00 100644 --- a/lib/objagg.c +++ b/lib/objagg.c @@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg, { void *delta_priv; + if (WARN_ON(!objagg_obj_is_root(parent))) + return -EINVAL; + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, objagg_obj->obj); if (IS_ERR(delta_priv)) @@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = { [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy, }; -static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg, - const void *obj) -{ - struct rhashtable *ht = arg->ht; - struct objagg_hints *objagg_hints = - container_of(ht, struct objagg_hints, node_ht); - const struct objagg_ops *ops = objagg_hints->ops; - const char *ptr = obj; - - ptr += ht->p.key_offset; - return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) : - memcmp(ptr, arg->key, ht->p.key_len); -} - /** * objagg_hints_get - obtains hints instance * @objagg: objagg instance @@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg, offsetof(struct objagg_hints_node, obj); objagg_hints->ht_params.head_offset = offsetof(struct objagg_hints_node, ht_node); - objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp; err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params); if (err) diff --git a/lib/objpool.c b/lib/objpool.c index cfdc024208..234f9d0bd0 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -50,7 +50,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, { int i, cpu_count = 0; - for (i = 0; i < pool->nr_cpus; i++) { + for (i = 0; i < nr_cpu_ids; i++) { struct objpool_slot *slot; int nodes, size, rc; @@ -60,8 +60,8 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, continue; /* compute how many objects to be allocated with this slot */ - nodes = nr_objs / num_possible_cpus(); - if (cpu_count < (nr_objs % num_possible_cpus())) + nodes = nr_objs / pool->nr_possible_cpus; + if (cpu_count < (nr_objs % pool->nr_possible_cpus)) nodes++; cpu_count++; @@ -103,7 +103,7 @@ static void objpool_fini_percpu_slots(struct objpool_head *pool) if (!pool->cpu_slots) return; - for (i = 0; i < pool->nr_cpus; i++) + for (i = 0; i < nr_cpu_ids; i++) kvfree(pool->cpu_slots[i]); kfree(pool->cpu_slots); } @@ -130,13 +130,13 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, /* initialize objpool pool */ memset(pool, 0, sizeof(struct objpool_head)); - pool->nr_cpus = nr_cpu_ids; + pool->nr_possible_cpus = num_possible_cpus(); pool->obj_size = object_size; pool->capacity = capacity; pool->gfp = gfp & ~__GFP_ZERO; pool->context = context; pool->release = release; - slot_size = pool->nr_cpus * sizeof(struct objpool_slot); + slot_size = nr_cpu_ids * sizeof(struct objpool_slot); pool->cpu_slots = kzalloc(slot_size, pool->gfp); if (!pool->cpu_slots) return -ENOMEM; @@ -152,106 +152,6 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, } EXPORT_SYMBOL_GPL(objpool_init); -/* adding object to slot, abort if the slot was already full */ -static inline int -objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu) -{ - struct objpool_slot *slot = pool->cpu_slots[cpu]; - uint32_t head, tail; - - /* loading tail and head as a local snapshot, tail first */ - tail = READ_ONCE(slot->tail); - - do { - head = READ_ONCE(slot->head); - /* fault caught: something must be wrong */ - WARN_ON_ONCE(tail - head > pool->nr_objs); - } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); - - /* now the tail position is reserved for the given obj */ - WRITE_ONCE(slot->entries[tail & slot->mask], obj); - /* update sequence to make this obj available for pop() */ - smp_store_release(&slot->last, tail + 1); - - return 0; -} - -/* reclaim an object to object pool */ -int objpool_push(void *obj, struct objpool_head *pool) -{ - unsigned long flags; - int rc; - - /* disable local irq to avoid preemption & interruption */ - raw_local_irq_save(flags); - rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id()); - raw_local_irq_restore(flags); - - return rc; -} -EXPORT_SYMBOL_GPL(objpool_push); - -/* try to retrieve object from slot */ -static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu) -{ - struct objpool_slot *slot = pool->cpu_slots[cpu]; - /* load head snapshot, other cpus may change it */ - uint32_t head = smp_load_acquire(&slot->head); - - while (head != READ_ONCE(slot->last)) { - void *obj; - - /* - * data visibility of 'last' and 'head' could be out of - * order since memory updating of 'last' and 'head' are - * performed in push() and pop() independently - * - * before any retrieving attempts, pop() must guarantee - * 'last' is behind 'head', that is to say, there must - * be available objects in slot, which could be ensured - * by condition 'last != head && last - head <= nr_objs' - * that is equivalent to 'last - head - 1 < nr_objs' as - * 'last' and 'head' are both unsigned int32 - */ - if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { - head = READ_ONCE(slot->head); - continue; - } - - /* obj must be retrieved before moving forward head */ - obj = READ_ONCE(slot->entries[head & slot->mask]); - - /* move head forward to mark it's consumption */ - if (try_cmpxchg_release(&slot->head, &head, head + 1)) - return obj; - } - - return NULL; -} - -/* allocate an object from object pool */ -void *objpool_pop(struct objpool_head *pool) -{ - void *obj = NULL; - unsigned long flags; - int i, cpu; - - /* disable local irq to avoid preemption & interruption */ - raw_local_irq_save(flags); - - cpu = raw_smp_processor_id(); - for (i = 0; i < num_possible_cpus(); i++) { - obj = objpool_try_get_slot(pool, cpu); - if (obj) - break; - cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); - } - raw_local_irq_restore(flags); - - return obj; -} -EXPORT_SYMBOL_GPL(objpool_pop); - /* release whole objpool forcely */ void objpool_free(struct objpool_head *pool) { diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index 4ef31b0bb7..d305b0c054 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -1178,14 +1178,28 @@ struct foo { s16 array[] __counted_by(counter); }; +struct bar { + int a; + u32 counter; + s16 array[]; +}; + static void DEFINE_FLEX_test(struct kunit *test) { - DEFINE_RAW_FLEX(struct foo, two, array, 2); + /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */ + DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2); +#if __has_attribute(__counted_by__) + int expected_raw_size = sizeof(struct foo); +#else + int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16); +#endif + /* Without annotation, it will always be on-stack size. */ + DEFINE_RAW_FLEX(struct bar, two, array, 2); DEFINE_FLEX(struct foo, eight, array, counter, 8); DEFINE_FLEX(struct foo, empty, array, counter, 0); - KUNIT_EXPECT_EQ(test, __struct_size(two), - sizeof(struct foo) + sizeof(s16) + sizeof(s16)); + KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size); + KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16)); KUNIT_EXPECT_EQ(test, __struct_size(eight), 24); KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo)); } diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 385a94aa0b..29127dd05d 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -33,27 +33,8 @@ CFLAGS_REMOVE_vpermxor8.o += -msoft-float endif endif -# The GCC option -ffreestanding is required in order to compile code containing -# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel) -ifeq ($(CONFIG_KERNEL_MODE_NEON),y) -NEON_FLAGS := -ffreestanding -# Enable <arm_neon.h> -NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include) -ifeq ($(ARCH),arm) -NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon -endif -CFLAGS_recov_neon_inner.o += $(NEON_FLAGS) -ifeq ($(ARCH),arm64) -CFLAGS_REMOVE_recov_neon_inner.o += -mgeneral-regs-only -CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only -CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only -CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only -CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only -endif -endif - quiet_cmd_unroll = UNROLL $@ - cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@ + cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@ targets += int1.c int2.c int4.c int8.c $(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE @@ -75,10 +56,16 @@ targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c $(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) -CFLAGS_neon1.o += $(NEON_FLAGS) -CFLAGS_neon2.o += $(NEON_FLAGS) -CFLAGS_neon4.o += $(NEON_FLAGS) -CFLAGS_neon8.o += $(NEON_FLAGS) +CFLAGS_neon1.o += $(CC_FLAGS_FPU) +CFLAGS_neon2.o += $(CC_FLAGS_FPU) +CFLAGS_neon4.o += $(CC_FLAGS_FPU) +CFLAGS_neon8.o += $(CC_FLAGS_FPU) +CFLAGS_recov_neon_inner.o += $(CC_FLAGS_FPU) +CFLAGS_REMOVE_neon1.o += $(CC_FLAGS_NO_FPU) +CFLAGS_REMOVE_neon2.o += $(CC_FLAGS_NO_FPU) +CFLAGS_REMOVE_neon4.o += $(CC_FLAGS_NO_FPU) +CFLAGS_REMOVE_neon8.o += $(CC_FLAGS_NO_FPU) +CFLAGS_REMOVE_recov_neon_inner.o += $(CC_FLAGS_NO_FPU) targets += neon1.c neon2.c neon4.c neon8.c $(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6ae2ba8e06..dbbed19f8f 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, if (ntbl) return ntbl; - ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + ntbl = alloc_hooks_tag(ht->alloc_tag, + kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO)); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) @@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size = sizeof(*tbl) + sizeof(tbl->buckets[0]); - tbl = kzalloc(size, gfp); + tbl = alloc_hooks_tag(ht->alloc_tag, + kmalloc_noprof(size, gfp|__GFP_ZERO)); if (!tbl) return NULL; @@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, int i; static struct lock_class_key __key; - tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); + tbl = alloc_hooks_tag(ht->alloc_tag, + kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), + gfp|__GFP_ZERO, NUMA_NO_NODE)); size = nbuckets; @@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) * .obj_hashfn = my_hash_fn, * }; */ -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; @@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht, spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); + alloc_tag_record(ht->alloc_tag); + if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); @@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht, return 0; } -EXPORT_SYMBOL_GPL(rhashtable_init); +EXPORT_SYMBOL_GPL(rhashtable_init_noprof); /** * rhltable_init - initialize a new hash list table @@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * * See documentation for rhashtable_init. */ -int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) { int err; - err = rhashtable_init(&hlt->ht, params); + err = rhashtable_init_noprof(&hlt->ht, params); hlt->ht.rhlist = true; return err; } -EXPORT_SYMBOL_GPL(rhltable_init); +EXPORT_SYMBOL_GPL(rhltable_init_noprof); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 92c6b1fd89..5e2e93307f 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb, /* * See if we have deferred clears that we can batch move */ -static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) +static inline bool sbitmap_deferred_clear(struct sbitmap_word *map, + unsigned int depth, unsigned int alloc_hint, bool wrap) { - unsigned long mask; + unsigned long mask, word_mask; - if (!READ_ONCE(map->cleared)) - return false; + guard(spinlock_irqsave)(&map->swap_lock); + + if (!map->cleared) { + if (depth == 0) + return false; + + word_mask = (~0UL) >> (BITS_PER_LONG - depth); + /* + * The current behavior is to always retry after moving + * ->cleared to word, and we change it to retry in case + * of any free bits. To avoid an infinite loop, we need + * to take wrap & alloc_hint into account, otherwise a + * soft lockup may occur. + */ + if (!wrap && alloc_hint) + word_mask &= ~((1UL << alloc_hint) - 1); + + return (READ_ONCE(map->word) & word_mask) != word_mask; + } /* * First get a stable cleared mask, setting the old mask to 0. @@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, bool alloc_hint) { unsigned int bits_per_word; + int i; if (shift < 0) shift = sbitmap_calculate_shift(depth); @@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, return -ENOMEM; } + for (i = 0; i < sb->map_nr; i++) + spin_lock_init(&sb->map[i].swap_lock); + return 0; } EXPORT_SYMBOL_GPL(sbitmap_init_node); @@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth) unsigned int i; for (i = 0; i < sb->map_nr; i++) - sbitmap_deferred_clear(&sb->map[i]); + sbitmap_deferred_clear(&sb->map[i], 0, 0, 0); sb->depth = depth; sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); @@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map, alloc_hint, wrap); if (nr != -1) break; - if (!sbitmap_deferred_clear(map)) + if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap)) break; } while (1); @@ -494,18 +516,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, struct sbitmap_word *map = &sb->map[index]; unsigned long get_mask; unsigned int map_depth = __map_depth(sb, index); + unsigned long val; - sbitmap_deferred_clear(map); - if (map->word == (1UL << (map_depth - 1)) - 1) + sbitmap_deferred_clear(map, 0, 0, 0); + val = READ_ONCE(map->word); + if (val == (1UL << (map_depth - 1)) - 1) goto next; - nr = find_first_zero_bit(&map->word, map_depth); + nr = find_first_zero_bit(&val, map_depth); if (nr + nr_tags <= map_depth) { atomic_long_t *ptr = (atomic_long_t *) &map->word; - unsigned long val; get_mask = ((1UL << nr_tags) - 1) << nr; - val = READ_ONCE(map->word); while (!atomic_long_try_cmpxchg(ptr, &val, get_mask | val)) ; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index cd8f234552..5ed34cc963 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries, * we won't be able to do that under the lock. */ if (unlikely(can_alloc && !READ_ONCE(new_pool))) { - /* - * Zero out zone modifiers, as we don't have specific zone - * requirements. Keep the flags related to allocation in atomic - * contexts, I/O, nolockdep. - */ - alloc_flags &= ~GFP_ZONEMASK; - alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP); - alloc_flags |= __GFP_NOWARN; - page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER); + page = alloc_pages(gfp_nested_mask(alloc_flags), + DEPOT_POOL_ORDER); if (page) prealloc = page_address(page); } diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c deleted file mode 100644 index ca09f7f0e6..0000000000 --- a/lib/strcat_kunit.c +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Kernel module for testing 'strcat' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -static volatile int unconst; - -static void test_strcat(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy does nothing. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* 4 characters copied in, stops at %NUL. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - /* 2 more characters copied in okay. */ - KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void test_strncat(struct kunit *test) -{ - char dest[8]; - - /* Destination is terminated. */ - memset(dest, 0, sizeof(dest)); - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy of size 0 does nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Empty copy of size 1 does nothing too. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Copy of max 0 characters should do nothing. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in, even if max is 8. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "four"); - KUNIT_EXPECT_EQ(test, dest[5], '\0'); - KUNIT_EXPECT_EQ(test, dest[6], '\0'); - /* 2 characters copied in okay, 2 ignored. */ - KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); -} - -static void test_strlcat(struct kunit *test) -{ - char dest[8] = ""; - int len = sizeof(dest) + unconst; - - /* Destination is terminated. */ - KUNIT_EXPECT_EQ(test, strlen(dest), 0); - /* Empty copy is size 0. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); - KUNIT_EXPECT_STREQ(test, dest, ""); - /* Size 1 should keep buffer terminated, report size of source only. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); - KUNIT_EXPECT_STREQ(test, dest, ""); - - /* 4 characters copied in. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); - KUNIT_EXPECT_STREQ(test, dest, "four"); - /* 2 characters copied in okay, gets to 6 total. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 2 characters ignored if max size (7) reached. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); - KUNIT_EXPECT_STREQ(test, dest, "fourAB"); - /* 1 of 2 characters skipped, now at true max size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); - /* Everything else ignored, now at full size. */ - KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); - KUNIT_EXPECT_STREQ(test, dest, "fourABE"); -} - -static struct kunit_case strcat_test_cases[] = { - KUNIT_CASE(test_strcat), - KUNIT_CASE(test_strncat), - KUNIT_CASE(test_strlcat), - {} -}; - -static struct kunit_suite strcat_test_suite = { - .name = "strcat", - .test_cases = strcat_test_cases, -}; - -kunit_test_suite(strcat_test_suite); - -MODULE_LICENSE("GPL"); diff --git a/lib/string_helpers_kunit.c b/lib/string_helpers_kunit.c index f88e39fd68..c853046183 100644 --- a/lib/string_helpers_kunit.c +++ b/lib/string_helpers_kunit.c @@ -625,4 +625,5 @@ static struct kunit_suite string_helpers_test_suite = { kunit_test_suites(&string_helpers_test_suite); +MODULE_DESCRIPTION("Test cases for string helpers module"); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/lib/string_kunit.c b/lib/string_kunit.c index dd19bd7748..c919e3293d 100644 --- a/lib/string_kunit.c +++ b/lib/string_kunit.c @@ -17,7 +17,7 @@ #define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0) #define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0) -static void test_memset16(struct kunit *test) +static void string_test_memset16(struct kunit *test) { unsigned i, j, k; u16 v, *p; @@ -46,7 +46,7 @@ static void test_memset16(struct kunit *test) } } -static void test_memset32(struct kunit *test) +static void string_test_memset32(struct kunit *test) { unsigned i, j, k; u32 v, *p; @@ -75,7 +75,7 @@ static void test_memset32(struct kunit *test) } } -static void test_memset64(struct kunit *test) +static void string_test_memset64(struct kunit *test) { unsigned i, j, k; u64 v, *p; @@ -104,7 +104,7 @@ static void test_memset64(struct kunit *test) } } -static void test_strchr(struct kunit *test) +static void string_test_strchr(struct kunit *test) { const char *test_string = "abcdefghijkl"; const char *empty_string = ""; @@ -127,7 +127,7 @@ static void test_strchr(struct kunit *test) KUNIT_ASSERT_NULL(test, result); } -static void test_strnchr(struct kunit *test) +static void string_test_strnchr(struct kunit *test) { const char *test_string = "abcdefghijkl"; const char *empty_string = ""; @@ -160,7 +160,7 @@ static void test_strnchr(struct kunit *test) KUNIT_ASSERT_NULL(test, result); } -static void test_strspn(struct kunit *test) +static void string_test_strspn(struct kunit *test) { static const struct strspn_test { const char str[16]; @@ -196,7 +196,7 @@ static void strcmp_fill_buffers(char fill1, char fill2) strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0; } -static void test_strcmp(struct kunit *test) +static void string_test_strcmp(struct kunit *test) { /* Equal strings */ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!"); @@ -214,7 +214,7 @@ static void test_strcmp(struct kunit *test) STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else"); } -static void test_strcmp_long_strings(struct kunit *test) +static void string_test_strcmp_long_strings(struct kunit *test) { strcmp_fill_buffers('B', 'B'); STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2); @@ -226,7 +226,7 @@ static void test_strcmp_long_strings(struct kunit *test) STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2); } -static void test_strncmp(struct kunit *test) +static void string_test_strncmp(struct kunit *test) { /* Equal strings */ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13); @@ -249,7 +249,7 @@ static void test_strncmp(struct kunit *test) strlen("Just a string")); } -static void test_strncmp_long_strings(struct kunit *test) +static void string_test_strncmp_long_strings(struct kunit *test) { strcmp_fill_buffers('B', 'B'); STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1, @@ -269,7 +269,7 @@ static void test_strncmp_long_strings(struct kunit *test) strcmp_buffer2, STRCMP_CHANGE_POINT + 1); } -static void test_strcasecmp(struct kunit *test) +static void string_test_strcasecmp(struct kunit *test) { /* Same strings in different case should be equal */ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!"); @@ -282,7 +282,7 @@ static void test_strcasecmp(struct kunit *test) STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^"); } -static void test_strcasecmp_long_strings(struct kunit *test) +static void string_test_strcasecmp_long_strings(struct kunit *test) { strcmp_fill_buffers('b', 'B'); STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); @@ -294,7 +294,7 @@ static void test_strcasecmp_long_strings(struct kunit *test) STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2); } -static void test_strncasecmp(struct kunit *test) +static void string_test_strncasecmp(struct kunit *test) { /* Same strings in different case should be equal */ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba")); @@ -306,7 +306,7 @@ static void test_strncasecmp(struct kunit *test) STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0); } -static void test_strncasecmp_long_strings(struct kunit *test) +static void string_test_strncasecmp_long_strings(struct kunit *test) { strcmp_fill_buffers('b', 'B'); STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1, @@ -326,21 +326,303 @@ static void test_strncasecmp_long_strings(struct kunit *test) strcmp_buffer2, STRCMP_CHANGE_POINT + 1); } +/** + * strscpy_check() - Run a specific test case. + * @test: KUnit test context pointer + * @src: Source string, argument to strscpy_pad() + * @count: Size of destination buffer, argument to strscpy_pad() + * @expected: Expected return value from call to strscpy_pad() + * @chars: Number of characters from the src string expected to be + * written to the dst buffer. + * @terminator: 1 if there should be a terminating null byte 0 otherwise. + * @pad: Number of pad characters expected (in the tail of dst buffer). + * (@pad does not include the null terminator byte.) + * + * Calls strscpy_pad() and verifies the return value and state of the + * destination buffer after the call returns. + */ +static void strscpy_check(struct kunit *test, char *src, int count, + int expected, int chars, int terminator, int pad) +{ + int nr_bytes_poison; + int max_expected; + int max_count; + int written; + char buf[6]; + int index, i; + const char POISON = 'z'; + + KUNIT_ASSERT_TRUE_MSG(test, src != NULL, + "null source string not supported"); + + memset(buf, POISON, sizeof(buf)); + /* Future proofing test suite, validate args */ + max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ + max_expected = count - 1; /* Space for the null */ + + KUNIT_ASSERT_LE_MSG(test, count, max_count, + "count (%d) is too big (%d) ... aborting", count, max_count); + KUNIT_EXPECT_LE_MSG(test, expected, max_expected, + "expected (%d) is bigger than can possibly be returned (%d)", + expected, max_expected); + + written = strscpy_pad(buf, src, count); + KUNIT_ASSERT_EQ(test, written, expected); + + if (count && written == -E2BIG) { + KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), + "buffer state invalid for -E2BIG"); + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "too big string is not null terminated correctly"); + } + + for (i = 0; i < chars; i++) + KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], + "buf[i]==%c != src[i]==%c", buf[i], src[i]); + + if (terminator) + KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', + "string is not null terminated correctly"); + + for (i = 0; i < pad; i++) { + index = chars + terminator + i; + KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', + "padding missing at index: %d", i); + } + + nr_bytes_poison = sizeof(buf) - chars - terminator - pad; + for (i = 0; i < nr_bytes_poison; i++) { + index = sizeof(buf) - 1 - i; /* Check from the end back */ + KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, + "poison value missing at index: %d", i); + } +} + +static void string_test_strscpy(struct kunit *test) +{ + char dest[8]; + + /* + * strscpy_check() uses a destination buffer of size 6 and needs at + * least 2 characters spare (one for null and one to check for + * overflow). This means we should only call tc() with + * strings up to a maximum of 4 characters long and 'count' + * should not exceed 4. To test with longer strings increase + * the buffer size in tc(). + */ + + /* strscpy_check(test, src, count, expected, chars, terminator, pad) */ + strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0); + strscpy_check(test, "", 0, -E2BIG, 0, 0, 0); + + strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0); + strscpy_check(test, "", 1, 0, 0, 1, 0); + + strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0); + strscpy_check(test, "a", 2, 1, 1, 1, 0); + strscpy_check(test, "", 2, 0, 0, 1, 1); + + strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0); + strscpy_check(test, "ab", 3, 2, 2, 1, 0); + strscpy_check(test, "a", 3, 1, 1, 1, 1); + strscpy_check(test, "", 3, 0, 0, 1, 2); + + strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0); + strscpy_check(test, "abc", 4, 3, 3, 1, 0); + strscpy_check(test, "ab", 4, 2, 2, 1, 1); + strscpy_check(test, "a", 4, 1, 1, 1, 2); + strscpy_check(test, "", 4, 0, 0, 1, 3); + + /* Compile-time-known source strings. */ + KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); + KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); + KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); +} + +static volatile int unconst; + +static void string_test_strcat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy does nothing. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* 4 characters copied in, stops at %NUL. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + /* 2 more characters copied in okay. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strncat(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy of size 0 does nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Empty copy of size 1 does nothing too. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Copy of max 0 characters should do nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in, even if max is 8. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + KUNIT_EXPECT_EQ(test, dest[6], '\0'); + /* 2 characters copied in okay, 2 ignored. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void string_test_strlcat(struct kunit *test) +{ + char dest[8] = ""; + int len = sizeof(dest) + unconst; + + /* Destination is terminated. */ + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy is size 0. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Size 1 should keep buffer terminated, report size of source only. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); + KUNIT_EXPECT_STREQ(test, dest, "four"); + /* 2 characters copied in okay, gets to 6 total. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 2 characters ignored if max size (7) reached. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 1 of 2 characters skipped, now at true max size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); + /* Everything else ignored, now at full size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); +} + +static void string_test_strtomem(struct kunit *test) +{ + static const char input[sizeof(unsigned long)] = "hi"; + static const char truncate[] = "this is too long"; + struct { + unsigned long canary1; + unsigned char output[sizeof(unsigned long)] __nonstring; + unsigned long canary2; + } wrap; + + memset(&wrap, 0xFF, sizeof(wrap)); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, + "bad initial canary value"); + KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, + "bad initial canary value"); + + /* Check unpadded copy leaves surroundings untouched. */ + strtomem(wrap.output, input); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated copy leaves surroundings untouched. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check padded copy leaves only string padded. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem_pad(wrap.output, input, 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); + KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); + for (size_t i = 2; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); + + /* Check truncated padded copy has no padding. */ + memset(&wrap, 0xFF, sizeof(wrap)); + strtomem(wrap.output, truncate); + KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); + for (size_t i = 0; i < sizeof(wrap.output); i++) + KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); + KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); +} + + +static void string_test_memtostr(struct kunit *test) +{ + char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' }; + char nonstring_small[3] = { 'a', 'b', 'c' }; + char dest[sizeof(nonstring) + 1]; + + /* Copy in a non-NUL-terminated string into exactly right-sized dest. */ + KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], 'X'); + + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], '\0'); +} + static struct kunit_case string_test_cases[] = { - KUNIT_CASE(test_memset16), - KUNIT_CASE(test_memset32), - KUNIT_CASE(test_memset64), - KUNIT_CASE(test_strchr), - KUNIT_CASE(test_strnchr), - KUNIT_CASE(test_strspn), - KUNIT_CASE(test_strcmp), - KUNIT_CASE(test_strcmp_long_strings), - KUNIT_CASE(test_strncmp), - KUNIT_CASE(test_strncmp_long_strings), - KUNIT_CASE(test_strcasecmp), - KUNIT_CASE(test_strcasecmp_long_strings), - KUNIT_CASE(test_strncasecmp), - KUNIT_CASE(test_strncasecmp_long_strings), + KUNIT_CASE(string_test_memset16), + KUNIT_CASE(string_test_memset32), + KUNIT_CASE(string_test_memset64), + KUNIT_CASE(string_test_strchr), + KUNIT_CASE(string_test_strnchr), + KUNIT_CASE(string_test_strspn), + KUNIT_CASE(string_test_strcmp), + KUNIT_CASE(string_test_strcmp_long_strings), + KUNIT_CASE(string_test_strncmp), + KUNIT_CASE(string_test_strncmp_long_strings), + KUNIT_CASE(string_test_strcasecmp), + KUNIT_CASE(string_test_strcasecmp_long_strings), + KUNIT_CASE(string_test_strncasecmp), + KUNIT_CASE(string_test_strncasecmp_long_strings), + KUNIT_CASE(string_test_strscpy), + KUNIT_CASE(string_test_strcat), + KUNIT_CASE(string_test_strncat), + KUNIT_CASE(string_test_strlcat), + KUNIT_CASE(string_test_strtomem), + KUNIT_CASE(string_test_memtostr), {} }; @@ -351,4 +633,5 @@ static struct kunit_suite string_test_suite = { kunit_test_suites(&string_test_suite); +MODULE_DESCRIPTION("Test cases for string functions"); MODULE_LICENSE("GPL v2"); diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c deleted file mode 100644 index b6d1d93a88..0000000000 --- a/lib/strscpy_kunit.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Kernel module for testing 'strscpy' family of functions. - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <kunit/test.h> -#include <linux/string.h> - -/** - * strscpy_check() - Run a specific test case. - * @test: KUnit test context pointer - * @src: Source string, argument to strscpy_pad() - * @count: Size of destination buffer, argument to strscpy_pad() - * @expected: Expected return value from call to strscpy_pad() - * @chars: Number of characters from the src string expected to be - * written to the dst buffer. - * @terminator: 1 if there should be a terminating null byte 0 otherwise. - * @pad: Number of pad characters expected (in the tail of dst buffer). - * (@pad does not include the null terminator byte.) - * - * Calls strscpy_pad() and verifies the return value and state of the - * destination buffer after the call returns. - */ -static void strscpy_check(struct kunit *test, char *src, int count, - int expected, int chars, int terminator, int pad) -{ - int nr_bytes_poison; - int max_expected; - int max_count; - int written; - char buf[6]; - int index, i; - const char POISON = 'z'; - - KUNIT_ASSERT_TRUE_MSG(test, src != NULL, - "null source string not supported"); - - memset(buf, POISON, sizeof(buf)); - /* Future proofing test suite, validate args */ - max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */ - max_expected = count - 1; /* Space for the null */ - - KUNIT_ASSERT_LE_MSG(test, count, max_count, - "count (%d) is too big (%d) ... aborting", count, max_count); - KUNIT_EXPECT_LE_MSG(test, expected, max_expected, - "expected (%d) is bigger than can possibly be returned (%d)", - expected, max_expected); - - written = strscpy_pad(buf, src, count); - KUNIT_ASSERT_EQ(test, written, expected); - - if (count && written == -E2BIG) { - KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1), - "buffer state invalid for -E2BIG"); - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "too big string is not null terminated correctly"); - } - - for (i = 0; i < chars; i++) - KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i], - "buf[i]==%c != src[i]==%c", buf[i], src[i]); - - if (terminator) - KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0', - "string is not null terminated correctly"); - - for (i = 0; i < pad; i++) { - index = chars + terminator + i; - KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0', - "padding missing at index: %d", i); - } - - nr_bytes_poison = sizeof(buf) - chars - terminator - pad; - for (i = 0; i < nr_bytes_poison; i++) { - index = sizeof(buf) - 1 - i; /* Check from the end back */ - KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON, - "poison value missing at index: %d", i); - } -} - -static void test_strscpy(struct kunit *test) -{ - char dest[8]; - - /* - * strscpy_check() uses a destination buffer of size 6 and needs at - * least 2 characters spare (one for null and one to check for - * overflow). This means we should only call tc() with - * strings up to a maximum of 4 characters long and 'count' - * should not exceed 4. To test with longer strings increase - * the buffer size in tc(). - */ - - /* strscpy_check(test, src, count, expected, chars, terminator, pad) */ - strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0); - strscpy_check(test, "", 0, -E2BIG, 0, 0, 0); - - strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0); - strscpy_check(test, "", 1, 0, 0, 1, 0); - - strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0); - strscpy_check(test, "a", 2, 1, 1, 1, 0); - strscpy_check(test, "", 2, 0, 0, 1, 1); - - strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0); - strscpy_check(test, "ab", 3, 2, 2, 1, 0); - strscpy_check(test, "a", 3, 1, 1, 1, 1); - strscpy_check(test, "", 3, 0, 0, 1, 2); - - strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0); - strscpy_check(test, "abc", 4, 3, 3, 1, 0); - strscpy_check(test, "ab", 4, 2, 2, 1, 1); - strscpy_check(test, "a", 4, 1, 1, 1, 2); - strscpy_check(test, "", 4, 0, 0, 1, 3); - - /* Compile-time-known source strings. */ - KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0); - KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG); - KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG); -} - -static struct kunit_case strscpy_test_cases[] = { - KUNIT_CASE(test_strscpy), - {} -}; - -static struct kunit_suite strscpy_test_suite = { - .name = "strscpy", - .test_cases = strscpy_test_cases, -}; - -kunit_test_suite(strscpy_test_suite); - -MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>"); -MODULE_LICENSE("GPL"); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 6b2b33579f..6dfb8d46a4 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = { }; static bool __init -__check_eq_uint(const char *srcfile, unsigned int line, - const unsigned int exp_uint, unsigned int x) +__check_eq_ulong(const char *srcfile, unsigned int line, + const unsigned long exp_ulong, unsigned long x) { - if (exp_uint != x) { - pr_err("[%s:%u] expected %u, got %u\n", - srcfile, line, exp_uint, x); + if (exp_ulong != x) { + pr_err("[%s:%u] expected %lu, got %lu\n", + srcfile, line, exp_ulong, x); return false; } return true; } - static bool __init __check_eq_bitmap(const char *srcfile, unsigned int line, const unsigned long *exp_bmap, const unsigned long *bmap, @@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line, result; \ }) -#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) +#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__) +#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y)) #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) @@ -244,7 +244,7 @@ static void __init test_find_nth_bit(void) expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5)); expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6)); expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7)); - expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8)); + expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3, 8) < 64 * 3)); expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0)); expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1)); @@ -254,7 +254,7 @@ static void __init test_find_nth_bit(void) expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5)); expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6)); expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7)); - expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8)); + expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3 - 1, 8) < 64 * 3 - 1)); for_each_set_bit(bit, exp1, EXP1_IN_BITS) { b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++); @@ -548,7 +548,7 @@ static void __init test_bitmap_parselist(void) } if (ptest.flags & PARSE_TIME) - pr_err("parselist: %d: input is '%s' OK, Time: %llu\n", + pr_info("parselist: %d: input is '%s' OK, Time: %llu\n", i, ptest.in, time); #undef ptest @@ -587,7 +587,7 @@ static void __init test_bitmap_printlist(void) goto out; } - pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); + pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time); out: kfree(buf); kfree(bmap); @@ -665,7 +665,7 @@ static void __init test_bitmap_parse(void) } if (test.flags & PARSE_TIME) - pr_err("parse: %d: input is '%s' OK, Time: %llu\n", + pr_info("parse: %d: input is '%s' OK, Time: %llu\n", i, test.in, time); } } @@ -1245,14 +1245,7 @@ static void __init test_bitmap_const_eval(void) * in runtime. */ - /* - * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`. - * Clang on s390 optimizes bitops at compile-time as intended, but at - * the same time stops treating @bitmap and @bitopvar as compile-time - * constants after regular test_bit() is executed, thus triggering the - * build bugs below. So, call const_test_bit() there directly until - * the compiler is fixed. - */ + /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */ bitmap_clear(bitmap, 0, BITS_PER_LONG); if (!test_bit(7, bitmap)) bitmap_set(bitmap, 5, 2); @@ -1284,8 +1277,179 @@ static void __init test_bitmap_const_eval(void) /* ~BIT(25) */ BUILD_BUG_ON(!__builtin_constant_p(~var)); BUILD_BUG_ON(~var != ~BIT(25)); + + /* ~BIT(25) | BIT(25) == ~0UL */ + bitmap_complement(&var, &var, BITS_PER_LONG); + __assign_bit(25, &var, true); + + /* !(~(~0UL)) == 1 */ + res = bitmap_full(&var, BITS_PER_LONG); + BUILD_BUG_ON(!__builtin_constant_p(res)); + BUILD_BUG_ON(!res); +} + +/* + * Test bitmap should be big enough to include the cases when start is not in + * the first word, and start+nbits lands in the following word. + */ +#define TEST_BIT_LEN (1000) + +/* + * Helper function to test bitmap_write() overwriting the chosen byte pattern. + */ +static void __init test_bitmap_write_helper(const char *pattern) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN); + DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN); + unsigned long w, r, bit; + int i, n, nbits; + + /* + * Only parse the pattern once and store the result in the intermediate + * bitmap. + */ + bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN); + + /* + * Check that writing a single bit does not accidentally touch the + * adjacent bits. + */ + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (bit = 0; bit <= 1; bit++) { + bitmap_write(bitmap, bit, i, 1); + __assign_bit(i, exp_bitmap, bit); + expect_eq_bitmap(exp_bitmap, bitmap, + TEST_BIT_LEN); + } + } + + /* Ensure writing 0 bits does not change anything. */ + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (i = 0; i < TEST_BIT_LEN; i++) { + bitmap_write(bitmap, ~0UL, i, 0); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + } + + for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) { + w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL + : 0xdeadbeefUL; + w >>= (BITS_PER_LONG - nbits); + for (i = 0; i <= TEST_BIT_LEN - nbits; i++) { + bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN); + bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN); + for (n = 0; n < nbits; n++) + __assign_bit(i + n, exp_bitmap, w & BIT(n)); + bitmap_write(bitmap, w, i, nbits); + expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN); + r = bitmap_read(bitmap, i, nbits); + expect_eq_ulong(r, w); + } + } +} + +static void __init test_bitmap_read_write(void) +{ + unsigned char *pattern[3] = {"", "all:1/2", "all"}; + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG; + unsigned long val; + int i, pi; + + /* + * Reading/writing zero bits should not crash the kernel. + * READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits)); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(zero_bits)); + + /* + * Reading/writing more than BITS_PER_LONG bits should not crash the + * kernel. READ_ONCE() prevents constant folding. + */ + bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1); + /* Return value of bitmap_read() is undefined here. */ + bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1); + + /* + * Ensure that bitmap_read() reads the same value that was previously + * written, and two consequent values are correctly merged. + * The resulting bit pattern is asymmetric to rule out possible issues + * with bit numeration order. + */ + for (i = 0; i < TEST_BIT_LEN - 7; i++) { + bitmap_zero(bitmap, TEST_BIT_LEN); + + bitmap_write(bitmap, 0b10101UL, i, 5); + val = bitmap_read(bitmap, i, 5); + expect_eq_ulong(0b10101UL, val); + + bitmap_write(bitmap, 0b101UL, i + 5, 3); + val = bitmap_read(bitmap, i + 5, 3); + expect_eq_ulong(0b101UL, val); + + val = bitmap_read(bitmap, i, 8); + expect_eq_ulong(0b10110101UL, val); + } + + for (pi = 0; pi < ARRAY_SIZE(pattern); pi++) + test_bitmap_write_helper(pattern[pi]); } +static void __init test_bitmap_read_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val; + ktime_t time; + + bitmap_fill(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + /* + * Prevent the compiler from optimizing away the + * bitmap_read() by using its value. + */ + WRITE_ONCE(val, bitmap_read(bitmap, i, nbits)); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +static void __init test_bitmap_write_perf(void) +{ + DECLARE_BITMAP(bitmap, TEST_BIT_LEN); + unsigned int cnt, nbits, i; + unsigned long val = 0xfeedface; + ktime_t time; + + bitmap_zero(bitmap, TEST_BIT_LEN); + time = ktime_get(); + for (cnt = 0; cnt < 5; cnt++) { + for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) { + for (i = 0; i < TEST_BIT_LEN; i++) { + if (i + nbits > TEST_BIT_LEN) + break; + bitmap_write(bitmap, val, i, nbits); + } + } + } + time = ktime_get() - time; + pr_info("Time spent in %s:\t%llu\n", __func__, time); +} + +#undef TEST_BIT_LEN + static void __init selftest(void) { test_zero_clear(); @@ -1303,6 +1467,9 @@ static void __init selftest(void) test_bitmap_cut(); test_bitmap_print_buf(); test_bitmap_const_eval(); + test_bitmap_read_write(); + test_bitmap_read_perf(); + test_bitmap_write_perf(); test_find_nth_bit(); test_for_each_set_bit(); diff --git a/lib/test_bitops.c b/lib/test_bitops.c index 3b7bcbee84..55669624bb 100644 --- a/lib/test_bitops.c +++ b/lib/test_bitops.c @@ -5,9 +5,11 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/cleanup.h> #include <linux/init.h> #include <linux/module.h> #include <linux/printk.h> +#include <linux/slab.h> /* a tiny module only meant to test * @@ -50,6 +52,30 @@ static unsigned long order_comb_long[][2] = { }; #endif +static int __init test_fns(void) +{ + static volatile __always_used unsigned long tmp __initdata; + unsigned long *buf __free(kfree) = NULL; + unsigned int i, n; + ktime_t time; + + buf = kmalloc_array(10000, sizeof(unsigned long), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + get_random_bytes(buf, 10000 * sizeof(unsigned long)); + time = ktime_get(); + + for (n = 0; n < BITS_PER_LONG; n++) + for (i = 0; i < 10000; i++) + tmp = fns(buf[i], n); + + time = ktime_get() - time; + pr_err("fns: %18llu ns\n", time); + + return 0; +} + static int __init test_bitops_startup(void) { int i, bit_set; @@ -94,6 +120,8 @@ static int __init test_bitops_startup(void) if (bit_set != BITOPS_LAST) pr_err("ERROR: FOUND SET BIT %d\n", bit_set); + test_fns(); + pr_info("Completed bitops test\n"); return 0; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 569e6d2dc5..207ff87194 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -13431,7 +13431,7 @@ static struct bpf_test tests[] = { .stack_depth = 8, .nr_testruns = NR_PATTERN_RUNS, }, - /* 64-bit atomic magnitudes */ + /* 32-bit atomic magnitudes */ { "ATOMIC_W_ADD: all operand magnitudes", { }, diff --git a/lib/test_fpu.h b/lib/test_fpu.h new file mode 100644 index 0000000000..4459807084 --- /dev/null +++ b/lib/test_fpu.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _LIB_TEST_FPU_H +#define _LIB_TEST_FPU_H + +int test_fpu(void); + +#endif diff --git a/lib/test_fpu.c b/lib/test_fpu_glue.c index e82db19fed..eef282a271 100644 --- a/lib/test_fpu.c +++ b/lib/test_fpu_glue.c @@ -17,39 +17,9 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/debugfs.h> -#include <asm/fpu/api.h> +#include <linux/fpu.h> -static int test_fpu(void) -{ - /* - * This sequence of operations tests that rounding mode is - * to nearest and that denormal numbers are supported. - * Volatile variables are used to avoid compiler optimizing - * the calculations away. - */ - volatile double a, b, c, d, e, f, g; - - a = 4.0; - b = 1e-15; - c = 1e-310; - - /* Sets precision flag */ - d = a + b; - - /* Result depends on rounding mode */ - e = a + b / 2; - - /* Denormal and very large values */ - f = b / c; - - /* Depends on denormal support */ - g = a + c * f; - - if (d > a && e > a && g > a) - return 0; - else - return -EINVAL; -} +#include "test_fpu.h" static int test_fpu_get(void *data, u64 *val) { @@ -68,6 +38,9 @@ static struct dentry *selftest_dir; static int __init test_fpu_init(void) { + if (!kernel_fpu_available()) + return -EINVAL; + selftest_dir = debugfs_create_dir("selftest_helpers", NULL); if (!selftest_dir) return -ENOMEM; diff --git a/lib/test_fpu_impl.c b/lib/test_fpu_impl.c new file mode 100644 index 0000000000..777894dbbe --- /dev/null +++ b/lib/test_fpu_impl.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include <linux/errno.h> + +#include "test_fpu.h" + +int test_fpu(void) +{ + /* + * This sequence of operations tests that rounding mode is + * to nearest and that denormal numbers are supported. + * Volatile variables are used to avoid compiler optimizing + * the calculations away. + */ + volatile double a, b, c, d, e, f, g; + + a = 4.0; + b = 1e-15; + c = 1e-310; + + /* Sets precision flag */ + d = a + b; + + /* Result depends on rounding mode */ + e = a + b / 2; + + /* Denormal and very large values */ + f = b / c; + + /* Depends on denormal support */ + g = a + c * f; + + if (d > a && e > a && g > a) + return 0; + else + return -EINVAL; +} diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c index b916801f23..fe2682bb21 100644 --- a/lib/test_hexdump.c +++ b/lib/test_hexdump.c @@ -113,7 +113,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize, *p++ = ' '; } while (p < test + rs * 2 + rs / gs + 1); - strncpy(p, data_a, l); + memcpy(p, data_a, l); p += l; } diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 42b5852082..c63db03ebb 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -811,4 +811,5 @@ static void __exit test_rht_exit(void) module_init(test_rht_init); module_exit(test_rht_exit); +MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module"); MODULE_LICENSE("GPL v2"); diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 928fc20337..ab9cc42a0d 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -2001,6 +2001,97 @@ static noinline void check_get_order(struct xarray *xa) } } +static noinline void check_xas_get_order(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j; + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xas_set_order(&xas, i << order, order); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(i)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + for (j = i << order; j < (i + 1) << order; j++) { + xas_set_order(&xas, j, 0); + rcu_read_lock(); + xas_load(&xas); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + rcu_read_unlock(); + } + + xas_lock(&xas); + xas_set_order(&xas, i << order, order); + xas_store(&xas, NULL); + xas_unlock(&xas); + } + } +} + +static noinline void check_xas_conflict_get_order(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + void *entry; + int only_once; + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j, k; + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xas_set_order(&xas, i << order, order); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(i)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + /* + * Ensure xas_get_order works with xas_for_each_conflict. + */ + j = i << order; + for (k = 0; k < order; k++) { + only_once = 0; + xas_set_order(&xas, j + (1 << k), k); + xas_lock(&xas); + xas_for_each_conflict(&xas, entry) { + XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + only_once++; + } + XA_BUG_ON(xa, only_once != 1); + xas_unlock(&xas); + } + + if (order < max_order - 1) { + only_once = 0; + xas_set_order(&xas, (i & ~1UL) << order, order + 1); + xas_lock(&xas); + xas_for_each_conflict(&xas, entry) { + XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + only_once++; + } + XA_BUG_ON(xa, only_once != 1); + xas_unlock(&xas); + } + + xas_set_order(&xas, i << order, order); + xas_lock(&xas); + xas_store(&xas, NULL); + xas_unlock(&xas); + } + } +} + + static noinline void check_destroy(struct xarray *xa) { unsigned long index; @@ -2052,6 +2143,8 @@ static int xarray_checks(void) check_multi_store(&array); check_multi_store_advanced(&array); check_get_order(&array); + check_xas_get_order(&array); + check_xas_conflict_get_order(&array); check_xa_alloc(); check_find(&array); check_find_entry(&array); diff --git a/lib/ubsan.h b/lib/ubsan.h index 0982578fbd..07e37d4429 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -43,7 +43,7 @@ enum { struct type_descriptor { u16 type_kind; u16 type_info; - char type_name[1]; + char type_name[]; }; struct source_location { diff --git a/lib/usercopy.c b/lib/usercopy.c index d29fe29c68..499a7a7d54 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -1,9 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/bitops.h> +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/instrumented.h> -#include <linux/uaccess.h> +#include <linux/kernel.h> #include <linux/nospec.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/wordpart.h> /* out-of-line parts */ diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig index d883ac2995..c46c230051 100644 --- a/lib/vdso/Kconfig +++ b/lib/vdso/Kconfig @@ -30,4 +30,11 @@ config GENERIC_VDSO_TIME_NS Selected by architectures which support time namespaces in the VDSO +config GENERIC_VDSO_OVERFLOW_PROTECT + bool + help + Select to add multiplication overflow protection to the VDSO + time getter functions for the price of an extra conditional + in the hotpath. + endif diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c index ce2f695520..899850bd6f 100644 --- a/lib/vdso/gettimeofday.c +++ b/lib/vdso/gettimeofday.c @@ -5,15 +5,23 @@ #include <vdso/datapage.h> #include <vdso/helpers.h> -#ifndef vdso_calc_delta -/* - * Default implementation which works for all sane clocksources. That - * obviously excludes x86/TSC. - */ -static __always_inline -u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) +#ifndef vdso_calc_ns + +#ifdef VDSO_DELTA_NOMASK +# define VDSO_DELTA_MASK(vd) ULLONG_MAX +#else +# define VDSO_DELTA_MASK(vd) (vd->mask) +#endif + +#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) +{ + return delta < vd->max_cycles; +} +#else +static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta) { - return ((cycles - last) & mask) * mult; + return true; } #endif @@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift) } #endif +/* + * Default implementation which works for all sane clocksources. That + * obviously excludes x86/TSC. + */ +static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base) +{ + u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd); + + if (likely(vdso_delta_ok(vd, delta))) + return vdso_shift_ns((delta * vd->mult) + base, vd->shift); + + return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift); +} +#endif /* vdso_calc_ns */ + #ifndef __arch_vdso_hres_capable static inline bool __arch_vdso_hres_capable(void) { @@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles) static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, struct __kernel_timespec *ts) { - const struct vdso_data *vd; const struct timens_offset *offs = &vdns->offset[clk]; const struct vdso_timestamp *vdso_ts; - u64 cycles, last, ns; + const struct vdso_data *vd; + u64 cycles, ns; u32 seq; s64 sec; @@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_ cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); @@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, struct __kernel_timespec *ts) { const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; - u64 cycles, last, sec, ns; + u64 cycles, sec, ns; u32 seq; /* Allows to compile the high resolution parts out */ @@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, cycles = __arch_get_hw_counter(vd->clock_mode, vd); if (unlikely(!vdso_cycles_ok(cycles))) return -1; - ns = vdso_ts->nsec; - last = vd->cycle_last; - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); - ns = vdso_shift_ns(ns, vd->shift); + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec); sec = vdso_ts->sec; } while (unlikely(vdso_read_retry(vd, seq))); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 552738f142..cdd4e2314b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -966,13 +966,13 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev, hd = bdev->bd_disk; buf = string(buf, end, hd->disk_name, spec); - if (bdev->bd_partno) { + if (bdev_is_partition(bdev)) { if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { if (buf < end) *buf = 'p'; buf++; } - buf = number(buf, end, bdev->bd_partno, spec); + buf = number(buf, end, bdev_partno(bdev), spec); } return buf; } diff --git a/lib/xarray.c b/lib/xarray.c index 5e7d6334d7..32d4bac8c9 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -200,7 +200,8 @@ static void *xas_start(struct xa_state *xas) return entry; } -static void *xas_descend(struct xa_state *xas, struct xa_node *node) +static __always_inline void *xas_descend(struct xa_state *xas, + struct xa_node *node) { unsigned int offset = get_offset(xas->xa_index, node); void *entry = xa_entry(xas->xa, node, offset); @@ -1765,39 +1766,52 @@ unlock: EXPORT_SYMBOL(xa_store_range); /** - * xa_get_order() - Get the order of an entry. - * @xa: XArray. - * @index: Index of the entry. + * xas_get_order() - Get the order of an entry. + * @xas: XArray operation state. + * + * Called after xas_load, the xas should not be in an error state. * * Return: A number between 0 and 63 indicating the order of the entry. */ -int xa_get_order(struct xarray *xa, unsigned long index) +int xas_get_order(struct xa_state *xas) { - XA_STATE(xas, xa, index); - void *entry; int order = 0; - rcu_read_lock(); - entry = xas_load(&xas); - - if (!entry) - goto unlock; - - if (!xas.xa_node) - goto unlock; + if (!xas->xa_node) + return 0; for (;;) { - unsigned int slot = xas.xa_offset + (1 << order); + unsigned int slot = xas->xa_offset + (1 << order); if (slot >= XA_CHUNK_SIZE) break; - if (!xa_is_sibling(xas.xa_node->slots[slot])) + if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot))) break; order++; } - order += xas.xa_node->shift; -unlock: + order += xas->xa_node->shift; + return order; +} +EXPORT_SYMBOL_GPL(xas_get_order); + +/** + * xa_get_order() - Get the order of an entry. + * @xa: XArray. + * @index: Index of the entry. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +int xa_get_order(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + int order = 0; + void *entry; + + rcu_read_lock(); + entry = xas_load(&xas); + if (entry) + order = xas_get_order(&xas); rcu_read_unlock(); return order; |