summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug118
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/Kconfig.ubsan31
-rw-r--r--lib/Makefile52
-rw-r--r--lib/alloc_tag.c273
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/bitmap.c7
-rw-r--r--lib/bootconfig.c3
-rwxr-xr-xlib/build_OID_registry7
-rw-r--r--lib/buildid.c10
-rw-r--r--lib/closure.c94
-rw-r--r--lib/cmpxchg-emu.c45
-rw-r--r--lib/codetag.c283
-rw-r--r--lib/crypto/Kconfig5
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/aescfb.c257
-rw-r--r--lib/debugobjects.c21
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--lib/devres.c234
-rw-r--r--lib/dhry_1.c2
-rw-r--r--lib/dhry_run.c1
-rw-r--r--lib/dim/Makefile4
-rw-r--r--lib/dim/dim.c3
-rw-r--r--lib/dynamic_debug.c7
-rw-r--r--lib/dynamic_queue_limits.c79
-rw-r--r--lib/find_bit.c14
-rw-r--r--lib/flex_proportions.c77
-rw-r--r--lib/fonts/Kconfig3
-rw-r--r--lib/fonts/fonts.c15
-rw-r--r--lib/fortify_kunit.c805
-rw-r--r--lib/fw_table.c15
-rw-r--r--lib/generic-radix-tree.c35
-rw-r--r--lib/iomap_copy.c13
-rw-r--r--lib/iov_iter.c60
-rw-r--r--lib/kfifo.c115
-rw-r--r--lib/kobject_uevent.c41
-rw-r--r--lib/kunit/Kconfig11
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/kunit/kunit-test.c45
-rw-r--r--lib/kunit/string-stream-test.c12
-rw-r--r--lib/kunit/try-catch.c31
-rw-r--r--lib/kunit_iov_iter.c18
-rw-r--r--lib/livepatch/Makefile14
-rw-r--r--lib/livepatch/test_klp_atomic_replace.c57
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c70
-rw-r--r--lib/livepatch/test_klp_callbacks_demo.c121
-rw-r--r--lib/livepatch/test_klp_callbacks_demo2.c93
-rw-r--r--lib/livepatch/test_klp_callbacks_mod.c24
-rw-r--r--lib/livepatch/test_klp_livepatch.c51
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c301
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/maple_tree.c6
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/math/prime_numbers.c2
-rw-r--r--lib/memcpy_kunit.c56
-rw-r--r--lib/objagg.c18
-rw-r--r--lib/objpool.c112
-rw-r--r--lib/overflow_kunit.c87
-rw-r--r--lib/pci_iomap.c180
-rw-r--r--lib/raid6/Makefile37
-rw-r--r--lib/raid6/s390vx.uc62
-rw-r--r--lib/rhashtable.c22
-rw-r--r--lib/sbitmap.c44
-rw-r--r--lib/sort.c20
-rw-r--r--lib/stackdepot.c19
-rw-r--r--lib/stackinit_kunit.c21
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string.c23
-rw-r--r--lib/string_helpers.c89
-rw-r--r--lib/string_helpers_kunit.c (renamed from lib/test-string_helpers.c)256
-rw-r--r--lib/string_kunit.c637
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test_bitmap.c249
-rw-r--r--lib/test_bitops.c28
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_fpu.h8
-rw-r--r--lib/test_fpu_glue.c (renamed from lib/test_fpu.c)37
-rw-r--r--lib/test_fpu_impl.c37
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_maple_tree.c44
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--lib/test_string.c257
-rw-r--r--lib/test_ubsan.c43
-rw-r--r--lib/test_vmalloc.c11
-rw-r--r--lib/test_xarray.c342
-rw-r--r--lib/ubsan.c86
-rw-r--r--lib/ubsan.h39
-rw-r--r--lib/usercopy.c9
-rw-r--r--lib/vdso/Kconfig7
-rw-r--r--lib/vdso/gettimeofday.c55
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xarray.c75
98 files changed, 4249 insertions, 3014 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5ddda7c2ed..b0a76dff5c 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -70,9 +70,6 @@ source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
bool
-config GENERIC_PCI_IOMAP
- bool
-
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
@@ -542,13 +539,7 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "Set number of CPUs at compile time"
- depends on SMP && EXPERT && !COMPILE_TEST
- help
- Say Yes if you have NR_CPUS set to an actual number of possible
- CPUs in your system, not to a default value. This forces the core
- code to rely on compile-time value and optimize kernel routines
- better.
+ def_bool !SMP
config CPU_RMAP
bool
@@ -631,7 +622,7 @@ config SIGNATURE
Implementation is done using GnuPG MPI library
config DIMLIB
- bool
+ tristate
help
Dynamic Interrupt Moderation library.
Implements an algorithm for dynamically changing CQ moderation values
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6352d59c57..59b6765d86 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -969,6 +969,37 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
+config CODE_TAGGING
+ bool
+ select KALLSYMS
+
+config MEM_ALLOC_PROFILING
+ bool "Enable memory allocation profiling"
+ default n
+ depends on PROC_FS
+ depends on !DEBUG_FORCE_WEAK_PER_CPU
+ select CODE_TAGGING
+ select PAGE_EXTENSION
+ select SLAB_OBJ_EXT
+ help
+ Track allocation source code and record total allocation size
+ initiated at that code location. The mechanism can be used to track
+ memory leaks with a low performance and memory impact.
+
+config MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ bool "Enable memory allocation profiling by default"
+ default y
+ depends on MEM_ALLOC_PROFILING
+
+config MEM_ALLOC_PROFILING_DEBUG
+ bool "Memory allocation profiler debugging"
+ default n
+ depends on MEM_ALLOC_PROFILING
+ select MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ help
+ Adds warnings with helpful error messages for memory allocation
+ profiling.
+
source "lib/Kconfig.kasan"
source "lib/Kconfig.kfence"
source "lib/Kconfig.kmsan"
@@ -1030,6 +1061,20 @@ config SOFTLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config SOFTLOCKUP_DETECTOR_INTR_STORM
+ bool "Detect Interrupt Storm in Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING
+ select GENERIC_IRQ_STAT_SNAPSHOT
+ default y if NR_CPUS <= 128
+ help
+ Say Y here to enable the kernel to detect interrupt storm
+ during "soft lockups".
+
+ "soft lockups" can be caused by a variety of reasons. If one is
+ caused by an interrupt storm, then the storming interrupts will not
+ be on the callstack. To detect this case, it is necessary to report
+ the CPU stats and the interrupt counts during the "soft lockups".
+
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
depends on SOFTLOCKUP_DETECTOR
@@ -1251,7 +1296,7 @@ config SCHED_INFO
config SCHEDSTATS
bool "Collect scheduler statistics"
- depends on DEBUG_KERNEL && PROC_FS
+ depends on PROC_FS
select SCHED_INFO
help
If you say Y here, additional code will be inserted into the
@@ -1304,7 +1349,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
- select DEBUG_RWSEMS
+ select DEBUG_RWSEMS if !PREEMPT_RT
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select PREEMPT_COUNT if !ARCH_NO_PREEMPT
@@ -1427,7 +1472,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT
help
This debugging feature allows mismatched rw semaphore locks
and unlocks to be detected and reported.
@@ -2086,7 +2131,7 @@ config KCOV
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
- GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
@@ -2128,7 +2173,7 @@ config KCOV_IRQ_AREA_SIZE
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
- def_bool y
+ default y
if RUNTIME_TESTING_MENU
@@ -2143,7 +2188,7 @@ config TEST_DHRY
To run the benchmark, it needs to be enabled explicitly, either from
the kernel command line (when built-in), or from userspace (when
- built-in or modular.
+ built-in or modular).
Run once during kernel boot:
@@ -2354,11 +2399,15 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
-config STRING_SELFTEST
- tristate "Test string functions at runtime"
+config STRING_KUNIT_TEST
+ tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
-config TEST_STRING_HELPERS
- tristate "Test functions located in the string_helpers module at runtime"
+config STRING_HELPERS_KUNIT_TEST
+ tristate "KUnit test string helpers at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -2433,7 +2482,6 @@ config TEST_LKM
config TEST_BITOPS
tristate "Test module for compilation of bitops operations"
- depends on m
help
This builds the "test_bitops" module that is much like the
TEST_LKM module except that it does a basic exercise of the
@@ -2700,18 +2748,6 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
-config MEMCPY_SLOW_KUNIT_TEST
- bool "Include exhaustive memcpy tests"
- depends on MEMCPY_KUNIT_TEST
- default y
- help
- Some memcpy tests are quite exhaustive in checking for overlaps
- and bit ranges. These can be very slow, so they are split out
- as a separate config, in case they need to be disabled.
-
- Note this config option will be replaced by the use of KUnit test
- attributes.
-
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2750,7 +2786,7 @@ config STACKINIT_KUNIT_TEST
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT && FORTIFY_SOURCE
+ depends on KUNIT
default KUNIT_ALL_TESTS
help
Builds unit tests for checking internals of FORTIFY_SOURCE as used
@@ -2767,16 +2803,6 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
-config STRCAT_KUNIT_TEST
- tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
-config STRSCPY_KUNIT_TEST
- tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
config SIPHASH_KUNIT_TEST
tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2859,28 +2885,6 @@ config TEST_MEMCAT_P
If unsure, say N.
-config TEST_LIVEPATCH
- tristate "Test livepatching"
- default n
- depends on DYNAMIC_DEBUG
- depends on LIVEPATCH
- depends on m
- help
- Test kernel livepatching features for correctness. The tests will
- load test modules that will be livepatched in various scenarios.
-
- To run all the livepatching tests:
-
- make -C tools/testing/selftests TARGETS=livepatch run_tests
-
- Alternatively, individual tests may be invoked:
-
- tools/testing/selftests/livepatch/test-callbacks.sh
- tools/testing/selftests/livepatch/test-livepatch.sh
- tools/testing/selftests/livepatch/test-shadow-vars.sh
-
- If unsure, say N.
-
config TEST_OBJAGG
tristate "Perform selftest on object aggreration manager"
default n
@@ -2921,7 +2925,7 @@ config TEST_FREE_PAGES
config TEST_FPU
tristate "Test floating point operations in kernel space"
- depends on X86 && !KCOV_INSTRUMENT_ALL
+ depends on ARCH_HAS_KERNEL_FPU_SUPPORT && !KCOV_INSTRUMENT_ALL
help
Enable this option to add /sys/kernel/debug/selftest_helpers/test_fpu
which will trigger a sequence of floating point operations. This is used
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index e6eda054ab..98016e137b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -158,7 +158,7 @@ config KASAN_STACK
out-of-bounds bugs in stack variables.
With Clang, stack instrumentation has a problem that causes excessive
- stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ stack usage, see https://llvm.org/pr38809. Thus,
with Clang, this option is deemed unsafe.
This option is always disabled when compile-testing with Clang to
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 3b9a440084..537e1b3f57 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
select MAGIC_SYSRQ
- depends on TTY && HW_CONSOLE
+ depends on TTY && VT
default y
help
Share a serial console with kgdb. Sysrq-g must be used
@@ -122,6 +122,7 @@ config KDB_DEFAULT_ENABLE
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB && !PARISC
+ depends on HAS_IOPORT
default n
help
KDB can use a PS/2 type keyboard for an input device
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 59e21bfec1..bdda600f8d 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ARCH_HAS_UBSAN_SANITIZE_ALL
+config ARCH_HAS_UBSAN
bool
menuconfig UBSAN
bool "Undefined behaviour sanity checker"
+ depends on ARCH_HAS_UBSAN
help
This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
@@ -87,7 +88,6 @@ config UBSAN_LOCAL_BOUNDS
config UBSAN_SHIFT
bool "Perform checking for bit-shift overflows"
- default UBSAN
depends on $(cc-option,-fsanitize=shift)
help
This option enables -fsanitize=shift which checks for bit-shift
@@ -116,6 +116,22 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
+config UBSAN_SIGNED_WRAP
+ bool "Perform checking for signed arithmetic wrap-around"
+ default UBSAN
+ depends on !COMPILE_TEST
+ # The no_sanitize attribute was introduced in GCC with version 8.
+ depends on !CC_IS_GCC || GCC_VERSION >= 80000
+ depends on $(cc-option,-fsanitize=signed-integer-overflow)
+ help
+ This option enables -fsanitize=signed-integer-overflow which checks
+ for wrap-around of any arithmetic operations with signed integers.
+ This currently performs nearly no instrumentation due to the
+ kernel's use of -fno-strict-overflow which converts all would-be
+ arithmetic undefined behavior into wrap-around arithmetic. Future
+ sanitizer versions will allow for wrap-around checking (rather than
+ exclusively undefined behavior).
+
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
default UBSAN
@@ -142,17 +158,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_SANITIZE_ALL
- bool "Enable instrumentation for the entire kernel"
- depends on ARCH_HAS_UBSAN_SANITIZE_ALL
- default y
- help
- This option activates instrumentation for the entire kernel.
- If you don't enable this option, you have to explicitly specify
- UBSAN_SANITIZE := y for the files/directories you want to check for UB.
- Enabling this option will get kernel image size increased
- significantly.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m
diff --git a/lib/Makefile b/lib/Makefile
index 6b09731d8e..30337431d1 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -49,9 +49,9 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_SELFTEST) += test_string.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -69,6 +69,7 @@ obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
@@ -109,32 +110,10 @@ CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
-#
-# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
-# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
-# get appended last to CFLAGS and thus override those previous compiler options.
-#
-FPU_CFLAGS := -msse -msse2
-ifdef CONFIG_CC_IS_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
-#
-# The "-msse" in the first argument is there so that the
-# -mpreferred-stack-boundary=3 build error:
-#
-# -mpreferred-stack-boundary=3 is not between 4 and 12
-#
-# can be triggered. Otherwise gcc doesn't complain.
-FPU_CFLAGS += -mhard-float
-FPU_CFLAGS += $(call cc-option,-msse -mpreferred-stack-boundary=3,-mpreferred-stack-boundary=4)
-endif
-
obj-$(CONFIG_TEST_FPU) += test_fpu.o
-CFLAGS_test_fpu.o += $(FPU_CFLAGS)
-
-obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
+test_fpu-y := test_fpu_glue.o test_fpu_impl.o
+CFLAGS_test_fpu_impl.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_test_fpu_impl.o += $(CC_FLAGS_NO_FPU)
# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
# so we can't just use obj-$(CONFIG_KUNIT).
@@ -153,7 +132,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
-obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
@@ -235,9 +213,13 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+obj-$(CONFIG_CODE_TAGGING) += codetag.o
+obj-$(CONFIG_MEM_ALLOC_PROFILING) += alloc_tag.o
+
lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
#ensure exported functions have prototypes
@@ -354,7 +336,7 @@ $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
$(call cmd,build_OID_registry)
quiet_cmd_build_OID_registry = GEN $@
- cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
+ cmd_build_OID_registry = perl $(src)/build_OID_registry $< $@
clean-files += oid_registry_data.c
@@ -401,10 +383,10 @@ obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
-obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
@@ -412,8 +394,8 @@ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
# FORTIFY_SOURCE compile-time behavior tests
-TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
-TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
+TEST_FORTIFY_SRCS = $(wildcard $(src)/test_fortify/*-*.c)
+TEST_FORTIFY_LOGS = $(patsubst $(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
TEST_FORTIFY_LOG = test_fortify.log
quiet_cmd_test_fortify = TEST $@
@@ -444,3 +426,7 @@ $(obj)/$(TEST_FORTIFY_LOG): $(addprefix $(obj)/, $(TEST_FORTIFY_LOGS)) FORCE
ifeq ($(CONFIG_FORTIFY_SOURCE),y)
$(obj)/string.o: $(obj)/$(TEST_FORTIFY_LOG)
endif
+
+# Some architectures define __NO_FORTIFY if __SANITIZE_ADDRESS__ is undefined.
+# Pass CFLAGS_KASAN to avoid warnings.
+$(foreach x, $(patsubst %.log,%.o,$(TEST_FORTIFY_LOGS)), $(eval KASAN_SANITIZE_$(x) := y))
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
new file mode 100644
index 0000000000..c347b8b72d
--- /dev/null
+++ b/lib/alloc_tag.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/alloc_tag.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/page_ext.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+
+static struct codetag_type *alloc_tag_cttype;
+
+DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
+EXPORT_SYMBOL(_shared_alloc_tag);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ mem_alloc_profiling_key);
+
+struct allocinfo_private {
+ struct codetag_iterator iter;
+ bool print_header;
+};
+
+static void *allocinfo_start(struct seq_file *m, loff_t *pos)
+{
+ struct allocinfo_private *priv;
+ struct codetag *ct;
+ loff_t node = *pos;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ m->private = priv;
+ if (!priv)
+ return NULL;
+
+ priv->print_header = (node == 0);
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ priv->iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(&priv->iter)) != NULL && node)
+ node--;
+
+ return ct ? priv : NULL;
+}
+
+static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)arg;
+ struct codetag *ct = codetag_next_ct(&priv->iter);
+
+ (*pos)++;
+ if (!ct)
+ return NULL;
+
+ return priv;
+}
+
+static void allocinfo_stop(struct seq_file *m, void *arg)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)m->private;
+
+ if (priv) {
+ codetag_lock_module_list(alloc_tag_cttype, false);
+ kfree(priv);
+ }
+}
+
+static void print_allocinfo_header(struct seq_buf *buf)
+{
+ /* Output format version, so we can change it. */
+ seq_buf_printf(buf, "allocinfo - version: 1.0\n");
+ seq_buf_printf(buf, "# <size> <calls> <tag info>\n");
+}
+
+static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ struct alloc_tag *tag = ct_to_alloc_tag(ct);
+ struct alloc_tag_counters counter = alloc_tag_read(tag);
+ s64 bytes = counter.bytes;
+
+ seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
+ codetag_to_text(out, ct);
+ seq_buf_putc(out, ' ');
+ seq_buf_putc(out, '\n');
+}
+
+static int allocinfo_show(struct seq_file *m, void *arg)
+{
+ struct allocinfo_private *priv = (struct allocinfo_private *)arg;
+ char *bufp;
+ size_t n = seq_get_buf(m, &bufp);
+ struct seq_buf buf;
+
+ seq_buf_init(&buf, bufp, n);
+ if (priv->print_header) {
+ print_allocinfo_header(&buf);
+ priv->print_header = false;
+ }
+ alloc_tag_to_text(&buf, priv->iter.ct);
+ seq_commit(m, seq_buf_used(&buf));
+ return 0;
+}
+
+static const struct seq_operations allocinfo_seq_op = {
+ .start = allocinfo_start,
+ .next = allocinfo_next,
+ .stop = allocinfo_stop,
+ .show = allocinfo_show,
+};
+
+size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
+{
+ struct codetag_iterator iter;
+ struct codetag *ct;
+ struct codetag_bytes n;
+ unsigned int i, nr = 0;
+
+ if (can_sleep)
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ else if (!codetag_trylock_module_list(alloc_tag_cttype))
+ return 0;
+
+ iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(&iter))) {
+ struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
+
+ n.ct = ct;
+ n.bytes = counter.bytes;
+
+ for (i = 0; i < nr; i++)
+ if (n.bytes > tags[i].bytes)
+ break;
+
+ if (i < count) {
+ nr -= nr == count;
+ memmove(&tags[i + 1],
+ &tags[i],
+ sizeof(tags[0]) * (nr - i));
+ nr++;
+ tags[i] = n;
+ }
+ }
+
+ codetag_lock_module_list(alloc_tag_cttype, false);
+
+ return nr;
+}
+
+static void __init procfs_init(void)
+{
+ proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
+}
+
+static bool alloc_tag_module_unload(struct codetag_type *cttype,
+ struct codetag_module *cmod)
+{
+ struct codetag_iterator iter = codetag_get_ct_iter(cttype);
+ struct alloc_tag_counters counter;
+ bool module_unused = true;
+ struct alloc_tag *tag;
+ struct codetag *ct;
+
+ for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
+ if (iter.cmod != cmod)
+ continue;
+
+ tag = ct_to_alloc_tag(ct);
+ counter = alloc_tag_read(tag);
+
+ if (WARN(counter.bytes,
+ "%s:%u module %s func:%s has %llu allocated at module unload",
+ ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
+ module_unused = false;
+ }
+
+ return module_unused;
+}
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+static bool mem_profiling_support __meminitdata = true;
+#else
+static bool mem_profiling_support __meminitdata;
+#endif
+
+static int __init setup_early_mem_profiling(char *str)
+{
+ bool enable;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ if (!strncmp(str, "never", 5)) {
+ enable = false;
+ mem_profiling_support = false;
+ } else {
+ int res;
+
+ res = kstrtobool(str, &enable);
+ if (res)
+ return res;
+
+ mem_profiling_support = true;
+ }
+
+ if (enable != static_key_enabled(&mem_alloc_profiling_key)) {
+ if (enable)
+ static_branch_enable(&mem_alloc_profiling_key);
+ else
+ static_branch_disable(&mem_alloc_profiling_key);
+ }
+
+ return 0;
+}
+early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
+
+static __init bool need_page_alloc_tagging(void)
+{
+ return mem_profiling_support;
+}
+
+static __init void init_page_alloc_tagging(void)
+{
+}
+
+struct page_ext_operations page_alloc_tagging_ops = {
+ .size = sizeof(union codetag_ref),
+ .need = need_page_alloc_tagging,
+ .init = init_page_alloc_tagging,
+};
+EXPORT_SYMBOL(page_alloc_tagging_ops);
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table memory_allocation_profiling_sysctls[] = {
+ {
+ .procname = "mem_profiling",
+ .data = &mem_alloc_profiling_key,
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ .mode = 0444,
+#else
+ .mode = 0644,
+#endif
+ .proc_handler = proc_do_static_key,
+ },
+ { }
+};
+
+static void __init sysctl_init(void)
+{
+ if (!mem_profiling_support)
+ memory_allocation_profiling_sysctls[0].mode = 0444;
+
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+}
+#else /* CONFIG_SYSCTL */
+static inline void sysctl_init(void) {}
+#endif /* CONFIG_SYSCTL */
+
+static int __init alloc_tag_init(void)
+{
+ const struct codetag_type_desc desc = {
+ .section = "alloc_tags",
+ .tag_size = sizeof(struct alloc_tag),
+ .module_unload = alloc_tag_module_unload,
+ };
+
+ alloc_tag_cttype = codetag_register_type(&desc);
+ if (IS_ERR(alloc_tag_cttype))
+ return PTR_ERR(alloc_tag_cttype);
+
+ sysctl_init();
+ procfs_init();
+
+ return 0;
+}
+module_init(alloc_tag_init);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ca0b4f360c..388e656ac9 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->leaf_p = &new_n0->slots[0];
pr_devel("<--%s() = ok [split shortcut]\n", __func__);
- return edit;
+ return true;
}
/**
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 09522af227..b976928549 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_and);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_andnot);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 8841554432..97f8911ea3 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -901,7 +901,8 @@ static int __init xbc_parse_tree(void)
}
/**
- * xbc_exit() - Clean up all parsed bootconfig
+ * _xbc_exit() - Clean up all parsed bootconfig
+ * @early: Set true if this is called before budy system is initialized.
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can
diff --git a/lib/build_OID_registry b/lib/build_OID_registry
index d7fc32ea8a..8267e8d713 100755
--- a/lib/build_OID_registry
+++ b/lib/build_OID_registry
@@ -8,6 +8,7 @@
#
use strict;
+use Cwd qw(abs_path);
my @names = ();
my @oids = ();
@@ -17,6 +18,8 @@ if ($#ARGV != 1) {
exit(2);
}
+my $abs_srctree = abs_path($ENV{'srctree'});
+
#
# Open the file to read from
#
@@ -35,7 +38,9 @@ close IN_FILE || die;
#
open C_FILE, ">$ARGV[1]" or die;
print C_FILE "/*\n";
-print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
+my $scriptname = $0;
+$scriptname =~ s#^\Q$abs_srctree/\E##;
+print C_FILE " * Automatically generated by ", $scriptname, ". Do not edit\n";
print C_FILE " */\n";
#
diff --git a/lib/buildid.c b/lib/buildid.c
index e3a7acdeef..7954dd92e3 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = kmap_atomic(page);
+ page_addr = kmap_local_page(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = get_build_id_64(page_addr, build_id, size);
out:
- kunmap_atomic(page_addr);
+ kunmap_local(page_addr);
put_page(page);
return ret;
}
@@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
return parse_build_id_buf(build_id, NULL, buf, buf_size);
}
-#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
/**
@@ -182,8 +182,8 @@ unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
*/
void __init init_vmlinux_build_id(void)
{
- extern const void __start_notes __weak;
- extern const void __stop_notes __weak;
+ extern const void __start_notes;
+ extern const void __stop_notes;
unsigned int size = &__stop_notes - &__start_notes;
build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
diff --git a/lib/closure.c b/lib/closure.c
index c16540552d..116afae2ee 100644
--- a/lib/closure.c
+++ b/lib/closure.c
@@ -13,14 +13,25 @@
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-static inline void closure_put_after_sub(struct closure *cl, int flags)
+static inline void closure_put_after_sub_checks(int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
- BUG_ON(flags & CLOSURE_GUARD_MASK);
- BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
+ if (WARN(flags & CLOSURE_GUARD_MASK,
+ "closure has guard bits set: %x (%u)",
+ flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
+ r &= ~CLOSURE_GUARD_MASK;
+
+ WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
+ "closure ref hit 0 with incorrect flags set: %x (%u)",
+ flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
+}
+
+static inline void closure_put_after_sub(struct closure *cl, int flags)
+{
+ closure_put_after_sub_checks(flags);
- if (!r) {
+ if (!(flags & CLOSURE_REMAINING_MASK)) {
smp_acquire__after_ctrl_dep();
cl->closure_get_happened = false;
@@ -139,6 +150,78 @@ void __sched __closure_sync(struct closure *cl)
}
EXPORT_SYMBOL(__closure_sync);
+/*
+ * closure_return_sync - finish running a closure, synchronously (i.e. waiting
+ * for outstanding get()s to finish) and returning once closure refcount is 0.
+ *
+ * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
+ * closure_get_not_zero() calls waill fail.
+ */
+void __sched closure_return_sync(struct closure *cl)
+{
+ struct closure_syncer s = { .task = current };
+
+ cl->s = &s;
+ set_closure_fn(cl, closure_sync_fn, NULL);
+
+ unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
+ &cl->remaining);
+
+ closure_put_after_sub_checks(flags);
+
+ if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ schedule();
+ }
+
+ __set_current_state(TASK_RUNNING);
+ }
+
+ if (cl->parent)
+ closure_put(cl->parent);
+}
+EXPORT_SYMBOL(closure_return_sync);
+
+int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
+{
+ struct closure_syncer s = { .task = current };
+ int ret = 0;
+
+ cl->s = &s;
+ continue_at(cl, closure_sync_fn, NULL);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (s.done)
+ break;
+ if (!timeout) {
+ /*
+ * Carefully undo the continue_at() - but only if it
+ * hasn't completed, i.e. the final closure_put() hasn't
+ * happened yet:
+ */
+ unsigned old, new, v = atomic_read(&cl->remaining);
+ do {
+ old = v;
+ if (!old || (old & CLOSURE_RUNNING))
+ goto success;
+
+ new = old + CLOSURE_REMAINING_INITIALIZER;
+ } while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old);
+ ret = -ETIME;
+ }
+
+ timeout = schedule_timeout(timeout);
+ }
+success:
+ __set_current_state(TASK_RUNNING);
+ return ret;
+}
+EXPORT_SYMBOL(__closure_sync_timeout);
+
#ifdef CONFIG_DEBUG_CLOSURES
static LIST_HEAD(closure_list);
@@ -161,6 +244,9 @@ void closure_debug_destroy(struct closure *cl)
{
unsigned long flags;
+ if (cl->magic == CLOSURE_MAGIC_STACK)
+ return;
+
BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
cl->magic = CLOSURE_MAGIC_DEAD;
diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c
new file mode 100644
index 0000000000..27f6f97cb6
--- /dev/null
+++ b/lib/cmpxchg-emu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Emulated 1-byte cmpxchg operation for architectures lacking direct
+ * support for this size. This is implemented in terms of 4-byte cmpxchg
+ * operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/instrumented.h>
+#include <linux/atomic.h>
+#include <linux/panic.h>
+#include <linux/bug.h>
+#include <asm-generic/rwonce.h>
+#include <linux/cmpxchg-emu.h>
+
+union u8_32 {
+ u8 b[4];
+ u32 w;
+};
+
+/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new)
+{
+ u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3);
+ int i = ((uintptr_t)p) & 0x3;
+ union u8_32 old32;
+ union u8_32 new32;
+ u32 ret;
+
+ ret = READ_ONCE(*p32);
+ do {
+ old32.w = ret;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ instrument_atomic_read_write(p, 1);
+ ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
+ } while (ret != old32.w);
+ return old;
+}
+EXPORT_SYMBOL_GPL(cmpxchg_emu_u8);
diff --git a/lib/codetag.c b/lib/codetag.c
new file mode 100644
index 0000000000..5ace625f23
--- /dev/null
+++ b/lib/codetag.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/codetag.h>
+#include <linux/idr.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/seq_buf.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+struct codetag_type {
+ struct list_head link;
+ unsigned int count;
+ struct idr mod_idr;
+ struct rw_semaphore mod_lock; /* protects mod_idr */
+ struct codetag_type_desc desc;
+};
+
+struct codetag_range {
+ struct codetag *start;
+ struct codetag *stop;
+};
+
+struct codetag_module {
+ struct module *mod;
+ struct codetag_range range;
+};
+
+static DEFINE_MUTEX(codetag_lock);
+static LIST_HEAD(codetag_types);
+
+void codetag_lock_module_list(struct codetag_type *cttype, bool lock)
+{
+ if (lock)
+ down_read(&cttype->mod_lock);
+ else
+ up_read(&cttype->mod_lock);
+}
+
+bool codetag_trylock_module_list(struct codetag_type *cttype)
+{
+ return down_read_trylock(&cttype->mod_lock) != 0;
+}
+
+struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype)
+{
+ struct codetag_iterator iter = {
+ .cttype = cttype,
+ .cmod = NULL,
+ .mod_id = 0,
+ .ct = NULL,
+ };
+
+ return iter;
+}
+
+static inline struct codetag *get_first_module_ct(struct codetag_module *cmod)
+{
+ return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL;
+}
+
+static inline
+struct codetag *get_next_module_ct(struct codetag_iterator *iter)
+{
+ struct codetag *res = (struct codetag *)
+ ((char *)iter->ct + iter->cttype->desc.tag_size);
+
+ return res < iter->cmod->range.stop ? res : NULL;
+}
+
+struct codetag *codetag_next_ct(struct codetag_iterator *iter)
+{
+ struct codetag_type *cttype = iter->cttype;
+ struct codetag_module *cmod;
+ struct codetag *ct;
+
+ lockdep_assert_held(&cttype->mod_lock);
+
+ if (unlikely(idr_is_empty(&cttype->mod_idr)))
+ return NULL;
+
+ ct = NULL;
+ while (true) {
+ cmod = idr_find(&cttype->mod_idr, iter->mod_id);
+
+ /* If module was removed move to the next one */
+ if (!cmod)
+ cmod = idr_get_next_ul(&cttype->mod_idr,
+ &iter->mod_id);
+
+ /* Exit if no more modules */
+ if (!cmod)
+ break;
+
+ if (cmod != iter->cmod) {
+ iter->cmod = cmod;
+ ct = get_first_module_ct(cmod);
+ } else
+ ct = get_next_module_ct(iter);
+
+ if (ct)
+ break;
+
+ iter->mod_id++;
+ }
+
+ iter->ct = ct;
+ return ct;
+}
+
+void codetag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ if (ct->modname)
+ seq_buf_printf(out, "%s:%u [%s] func:%s",
+ ct->filename, ct->lineno,
+ ct->modname, ct->function);
+ else
+ seq_buf_printf(out, "%s:%u func:%s",
+ ct->filename, ct->lineno, ct->function);
+}
+
+static inline size_t range_size(const struct codetag_type *cttype,
+ const struct codetag_range *range)
+{
+ return ((char *)range->stop - (char *)range->start) /
+ cttype->desc.tag_size;
+}
+
+#ifdef CONFIG_MODULES
+static void *get_symbol(struct module *mod, const char *prefix, const char *name)
+{
+ DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN);
+ const char *buf;
+ void *ret;
+
+ seq_buf_printf(&sb, "%s%s", prefix, name);
+ if (seq_buf_has_overflowed(&sb))
+ return NULL;
+
+ buf = seq_buf_str(&sb);
+ preempt_disable();
+ ret = mod ?
+ (void *)find_kallsyms_symbol_value(mod, buf) :
+ (void *)kallsyms_lookup_name(buf);
+ preempt_enable();
+
+ return ret;
+}
+
+static struct codetag_range get_section_range(struct module *mod,
+ const char *section)
+{
+ return (struct codetag_range) {
+ get_symbol(mod, "__start_", section),
+ get_symbol(mod, "__stop_", section),
+ };
+}
+
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
+{
+ struct codetag_range range;
+ struct codetag_module *cmod;
+ int err;
+
+ range = get_section_range(mod, cttype->desc.section);
+ if (!range.start || !range.stop) {
+ pr_warn("Failed to load code tags of type %s from the module %s\n",
+ cttype->desc.section,
+ mod ? mod->name : "(built-in)");
+ return -EINVAL;
+ }
+
+ /* Ignore empty ranges */
+ if (range.start == range.stop)
+ return 0;
+
+ BUG_ON(range.start > range.stop);
+
+ cmod = kmalloc(sizeof(*cmod), GFP_KERNEL);
+ if (unlikely(!cmod))
+ return -ENOMEM;
+
+ cmod->mod = mod;
+ cmod->range = range;
+
+ down_write(&cttype->mod_lock);
+ err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
+ if (err >= 0) {
+ cttype->count += range_size(cttype, &range);
+ if (cttype->desc.module_load)
+ cttype->desc.module_load(cttype, cmod);
+ }
+ up_write(&cttype->mod_lock);
+
+ if (err < 0) {
+ kfree(cmod);
+ return err;
+ }
+
+ return 0;
+}
+
+void codetag_load_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+
+ if (!mod)
+ return;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link)
+ codetag_module_init(cttype, mod);
+ mutex_unlock(&codetag_lock);
+}
+
+bool codetag_unload_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+ bool unload_ok = true;
+
+ if (!mod)
+ return true;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ struct codetag_module *found = NULL;
+ struct codetag_module *cmod;
+ unsigned long mod_id, tmp;
+
+ down_write(&cttype->mod_lock);
+ idr_for_each_entry_ul(&cttype->mod_idr, cmod, tmp, mod_id) {
+ if (cmod->mod && cmod->mod == mod) {
+ found = cmod;
+ break;
+ }
+ }
+ if (found) {
+ if (cttype->desc.module_unload)
+ if (!cttype->desc.module_unload(cttype, cmod))
+ unload_ok = false;
+
+ cttype->count -= range_size(cttype, &cmod->range);
+ idr_remove(&cttype->mod_idr, mod_id);
+ kfree(cmod);
+ }
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+
+ return unload_ok;
+}
+
+#else /* CONFIG_MODULES */
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; }
+#endif /* CONFIG_MODULES */
+
+struct codetag_type *
+codetag_register_type(const struct codetag_type_desc *desc)
+{
+ struct codetag_type *cttype;
+ int err;
+
+ BUG_ON(desc->tag_size <= 0);
+
+ cttype = kzalloc(sizeof(*cttype), GFP_KERNEL);
+ if (unlikely(!cttype))
+ return ERR_PTR(-ENOMEM);
+
+ cttype->desc = *desc;
+ idr_init(&cttype->mod_idr);
+ init_rwsem(&cttype->mod_lock);
+
+ err = codetag_module_init(cttype, NULL);
+ if (unlikely(err)) {
+ kfree(cttype);
+ return ERR_PTR(err);
+ }
+
+ mutex_lock(&codetag_lock);
+ list_add_tail(&cttype->link, &codetag_types);
+ mutex_unlock(&codetag_lock);
+
+ return cttype;
+}
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 45436bfc6d..b01253cac7 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS
config CRYPTO_LIB_AES
tristate
+config CRYPTO_LIB_AESCFB
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_UTILS
+
config CRYPTO_LIB_AESGCM
tristate
select CRYPTO_LIB_AES
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 8d1446c2be..969baab8c8 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
+obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
+libaescfb-y := aescfb.o
+
obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
libaesgcm-y := aesgcm.o
diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c
new file mode 100644
index 0000000000..749dc1258a
--- /dev/null
+++ b/lib/crypto/aescfb.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of AES in CFB mode
+ *
+ * Copyright 2023 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <asm/irqflags.h>
+
+static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV
+ * and ciphertext), making it susceptible to timing attacks on the
+ * encryption key. The AES library already mitigates this risk to some
+ * extent by pulling the entire S-box into the caches before doing any
+ * substitutions, but this strategy is more effective when running with
+ * interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aescfb_encrypt - Perform AES-CFB encryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[AES_BLOCK_SIZE];
+ const u8 *v = iv;
+
+ while (len > 0) {
+ aescfb_encrypt_block(ctx, ks, v);
+ crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE));
+ v = dst;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_encrypt);
+
+/**
+ * aescfb_decrypt - Perform AES-CFB decryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[2][AES_BLOCK_SIZE];
+
+ aescfb_encrypt_block(ctx, ks[0], iv);
+
+ for (int i = 0; len > 0; i ^= 1) {
+ if (len > AES_BLOCK_SIZE)
+ /*
+ * Generate the keystream for the next block before
+ * performing the XOR, as that may update in place and
+ * overwrite the ciphertext.
+ */
+ aescfb_encrypt_block(ctx, ks[!i], src);
+
+ crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-CFB library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static struct {
+ u8 ptext[64];
+ u8 ctext[64];
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
+
+ int klen;
+ int len;
+} const aescfb_tv[] __initconst = {
+ { /* From NIST SP800-38A */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+ .len = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+ .len = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+ .len = 64,
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
+ },
+};
+
+static int __init libaescfb_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) {
+ struct crypto_aes_ctx ctx;
+ u8 buf[64];
+
+ if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) {
+ pr_err("aes_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len,
+ aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) {
+ pr_err("aescfb_decrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #2 failed on vector %d\n", i);
+
+ return -ENODEV;
+ }
+
+ }
+ return 0;
+}
+module_init(libaescfb_init);
+
+static void __exit libaescfb_exit(void)
+{
+}
+module_exit(libaescfb_exit);
+#endif
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index fb12a9bacd..7cea91e193 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -78,16 +78,17 @@ static bool obj_freeing;
/* The number of objs on the global free list */
static int obj_nr_tofree;
-static int debug_objects_maxchain __read_mostly;
-static int __maybe_unused debug_objects_maxchecked __read_mostly;
-static int debug_objects_fixups __read_mostly;
-static int debug_objects_warnings __read_mostly;
-static int debug_objects_enabled __read_mostly
- = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
-static int debug_objects_pool_size __read_mostly
- = ODEBUG_POOL_SIZE;
-static int debug_objects_pool_min_level __read_mostly
- = ODEBUG_POOL_MIN_LEVEL;
+static int __data_racy debug_objects_maxchain __read_mostly;
+static int __data_racy __maybe_unused debug_objects_maxchecked __read_mostly;
+static int __data_racy debug_objects_fixups __read_mostly;
+static int __data_racy debug_objects_warnings __read_mostly;
+static int __data_racy debug_objects_enabled __read_mostly
+ = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
+static int __data_racy debug_objects_pool_size __read_mostly
+ = ODEBUG_POOL_SIZE;
+static int __data_racy debug_objects_pool_min_level __read_mostly
+ = ODEBUG_POOL_MIN_LEVEL;
+
static const struct debug_obj_descr *descr_test __read_mostly;
static struct kmem_cache *obj_cache __ro_after_init;
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 3518e7394e..ca736166f1 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+ unsigned char length[MAX_SYMBOLS];
+ unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
diff --git a/lib/devres.c b/lib/devres.c
index c44f104b58..4fc152de6d 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/err.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/gfp.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/export.h>
+#include <linux/gfp_types.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/types.h>
enum devm_ioremap_type {
DEVM_IOREMAP = 0,
@@ -125,12 +128,13 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
resource_size_t size;
void __iomem *dest_ptr;
char *pretty_name;
+ int ret;
BUG_ON(!dev);
if (!res || resource_type(res) != IORESOURCE_MEM) {
- dev_err(dev, "invalid resource %pR\n", res);
- return IOMEM_ERR_PTR(-EINVAL);
+ ret = dev_err_probe(dev, -EINVAL, "invalid resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
@@ -144,20 +148,20 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
else
pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
if (!pretty_name) {
- dev_err(dev, "can't generate pretty name for resource %pR\n", res);
- return IOMEM_ERR_PTR(-ENOMEM);
+ ret = dev_err_probe(dev, -ENOMEM, "can't generate pretty name for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
- dev_err(dev, "can't request region for resource %pR\n", res);
- return IOMEM_ERR_PTR(-EBUSY);
+ ret = dev_err_probe(dev, -EBUSY, "can't request region for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
dest_ptr = __devm_ioremap(dev, res->start, size, type);
if (!dest_ptr) {
- dev_err(dev, "ioremap failed for resource %pR\n", res);
devm_release_mem_region(dev, res->start, size);
- dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
+ ret = dev_err_probe(dev, -ENOMEM, "ioremap failed for resource %pR\n", res);
+ return IOMEM_ERR_PTR(ret);
}
return dest_ptr;
@@ -311,212 +315,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
EXPORT_SYMBOL(devm_ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
-#ifdef CONFIG_PCI
-/*
- * PCI iomap devres
- */
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
-
-struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
-};
-
-static void pcim_iomap_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
-}
-
-/**
- * pcim_iomap_table - access iomap allocation table
- * @pdev: PCI device to access iomap table for
- *
- * Access iomap allocation table for @dev. If iomap table doesn't
- * exist and @pdev is managed, it will be allocated. All iomaps
- * recorded in the iomap table are automatically unmapped on driver
- * detach.
- *
- * This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succeed once
- * allocated.
- */
-void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
-{
- struct pcim_iomap_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
- if (dr)
- return dr->table;
-
- new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
- dev_to_node(&pdev->dev));
- if (!new_dr)
- return NULL;
- dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
- return dr->table;
-}
-EXPORT_SYMBOL(pcim_iomap_table);
-
-/**
- * pcim_iomap - Managed pcim_iomap()
- * @pdev: PCI device to iomap for
- * @bar: BAR to iomap
- * @maxlen: Maximum length of iomap
- *
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
-{
- void __iomem **tbl;
-
- BUG_ON(bar >= PCIM_IOMAP_MAX);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
- return NULL;
-
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
-}
-EXPORT_SYMBOL(pcim_iomap);
-
-/**
- * pcim_iounmap - Managed pci_iounmap()
- * @pdev: PCI device to iounmap for
- * @addr: Address to unmap
- *
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
- */
-void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
-{
- void __iomem **tbl;
- int i;
-
- pci_iounmap(pdev, addr);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
-}
-EXPORT_SYMBOL(pcim_iounmap);
-
-/**
- * pcim_iomap_regions - Request and iomap PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
- *
- * Request and iomap regions specified by @mask.
- */
-int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
-{
- void __iomem * const *iomap;
- int i, rc;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
-
- if (!(mask & (1 << i)))
- continue;
-
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
-
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
-
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
- }
-
- return 0;
-
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions);
-
-/**
- * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
- *
- * Request all PCI BARs and iomap regions specified by @mask.
- */
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name)
-{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
-
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
-
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions_request_all);
-
-/**
- * pcim_iounmap_regions - Unmap and release PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- void __iomem * const *iomap;
- int i;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
- continue;
-
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif /* CONFIG_PCI */
-
static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
{
arch_phys_wc_del(*((int *)res));
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index 08edbbb19f..ca6c87232c 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -277,7 +277,7 @@ int dhry(int n)
dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
- User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+ User_Time = ktime_ms_delta(End_Time, Begin_Time);
kfree(Ptr_Glob);
kfree(Next_Ptr_Glob);
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
index f15ac666e9..e6a279dabf 100644
--- a/lib/dhry_run.c
+++ b/lib/dhry_run.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/smp.h>
#define DHRY_VAX 1757
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index 1d6858a108..c4cc4026c4 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -2,6 +2,6 @@
# DIM Dynamic Interrupt Moderation library
#
-obj-$(CONFIG_DIMLIB) += dim.o
+obj-$(CONFIG_DIMLIB) += dimlib.o
-dim-y := dim.o net_dim.o rdma_dim.o
+dimlib-objs := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index e89aaf07bd..83b65ac74d 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
return true;
}
EXPORT_SYMBOL(dim_calc_stats);
+
+MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index a5a687e1c9..f2c5e7910b 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -644,10 +644,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa
int cls_id, totct = 0;
bool wanted;
- cl_str = tmp = kstrdup(instr, GFP_KERNEL);
- p = strchr(cl_str, '\n');
- if (p)
- *p = '\0';
+ cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
/* start with previously set state-bits, then modify */
curr_bits = old_bits = *dcp->bits;
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index fde0aa2441..e49deddd3d 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,18 +10,90 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+static void dql_check_stall(struct dql *dql, unsigned short stall_thrs)
+{
+ unsigned long now;
+
+ if (!stall_thrs)
+ return;
+
+ now = jiffies;
+ /* Check for a potential stall */
+ if (time_after_eq(now, dql->last_reap + stall_thrs)) {
+ unsigned long hist_head, t, start, end;
+
+ /* We are trying to detect a period of at least @stall_thrs
+ * jiffies without any Tx completions, but during first half
+ * of which some Tx was posted.
+ */
+dqs_again:
+ hist_head = READ_ONCE(dql->history_head);
+ /* pairs with smp_wmb() in dql_queued() */
+ smp_rmb();
+
+ /* Get the previous entry in the ring buffer, which is the
+ * oldest sample.
+ */
+ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
+
+ /* Advance start to continue from the last reap time */
+ if (time_before(start, dql->last_reap + 1))
+ start = dql->last_reap + 1;
+
+ /* Newest sample we should have already seen a completion for */
+ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
+
+ /* Shrink the search space to [start, (now - start_thrs/2)] if
+ * `end` is beyond the stall zone
+ */
+ if (time_before(now, end + stall_thrs / 2))
+ end = now - stall_thrs / 2;
+
+ /* Search for the queued time in [t, end] */
+ for (t = start; time_before_eq(t, end); t++)
+ if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
+ dql->history))
+ break;
+
+ /* Variable t contains the time of the queue */
+ if (!time_before_eq(t, end))
+ goto no_stall;
+
+ /* The ring buffer was modified in the meantime, retry */
+ if (hist_head != READ_ONCE(dql->history_head))
+ goto dqs_again;
+
+ dql->stall_cnt++;
+ dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
+
+ trace_dql_stall_detected(dql->stall_thrs, now - t,
+ dql->last_reap, dql->history_head,
+ now, dql->history);
+ }
+no_stall:
+ dql->last_reap = now;
+}
+
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
+ unsigned short stall_thrs;
bool all_prev_completed;
num_queued = READ_ONCE(dql->num_queued);
+ /* Read stall_thrs in advance since it belongs to the same (first)
+ * cache line as ->num_queued. This way, dql_check_stall() does not
+ * need to touch the first cache line again later, reducing the window
+ * of possible false sharing.
+ */
+ stall_thrs = READ_ONCE(dql->stall_thrs);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
@@ -110,6 +182,8 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
+
+ dql_check_stall(dql, stall_thrs);
}
EXPORT_SYMBOL(dql_completed);
@@ -125,6 +199,10 @@ void dql_reset(struct dql *dql)
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
+
+ dql->last_reap = jiffies;
+ dql->history_head = jiffies / BITS_PER_LONG;
+ memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
@@ -133,6 +211,7 @@ void dql_init(struct dql *dql, unsigned int hold_time)
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
+ dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 32f99e9a67..0836bb3d76 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -87,7 +87,7 @@ out: \
if (sz % BITS_PER_LONG) \
tmp = (FETCH) & BITMAP_LAST_WORD_MASK(sz); \
found: \
- sz = min(idx * BITS_PER_LONG + fns(tmp, nr), sz); \
+ sz = idx * BITS_PER_LONG + fns(tmp, nr); \
out: \
sz; \
})
@@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
EXPORT_SYMBOL(_find_first_and_bit);
#endif
+/*
+ * Find the first set bit in three memory regions.
+ */
+unsigned long _find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_and_and_bit);
+
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 83332fefa6..84ecccddc7 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods)
}
/*
- * ---- SINGLE ----
- */
-
-int fprop_local_init_single(struct fprop_local_single *pl)
-{
- pl->events = 0;
- pl->period = 0;
- raw_spin_lock_init(&pl->lock);
- return 0;
-}
-
-void fprop_local_destroy_single(struct fprop_local_single *pl)
-{
-}
-
-static void fprop_reflect_period_single(struct fprop_global *p,
- struct fprop_local_single *pl)
-{
- unsigned int period = p->period;
- unsigned long flags;
-
- /* Fast path - period didn't change */
- if (pl->period == period)
- return;
- raw_spin_lock_irqsave(&pl->lock, flags);
- /* Someone updated pl->period while we were spinning? */
- if (pl->period >= period) {
- raw_spin_unlock_irqrestore(&pl->lock, flags);
- return;
- }
- /* Aging zeroed our fraction? */
- if (period - pl->period < BITS_PER_LONG)
- pl->events >>= period - pl->period;
- else
- pl->events = 0;
- pl->period = period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/* Event of type pl happened */
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- fprop_reflect_period_single(p, pl);
- pl->events++;
- percpu_counter_add(&p->events, 1);
-}
-
-/* Return fraction of events of type pl */
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl,
- unsigned long *numerator, unsigned long *denominator)
-{
- unsigned int seq;
- s64 num, den;
-
- do {
- seq = read_seqcount_begin(&p->sequence);
- fprop_reflect_period_single(p, pl);
- num = pl->events;
- den = percpu_counter_read_positive(&p->events);
- } while (read_seqcount_retry(&p->sequence, seq));
-
- /*
- * Make fraction <= 1 and denominator > 0 even in presence of percpu
- * counter errors
- */
- if (den <= num) {
- if (num)
- den = num;
- else
- den = 1;
- }
- *denominator = den;
- *numerator = num;
-}
-
-/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 7ee468ef21..7e945fdcbf 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,8 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \
+ BOOTX_TEXT || EARLYFB
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9738664386..47e34950b6 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -96,18 +96,21 @@ EXPORT_SYMBOL(find_font);
* get_default_font - get default font
* @xres: screen size of X
* @yres: screen size of Y
- * @font_w: bit array of supported widths (1 - 32)
- * @font_h: bit array of supported heights (1 - 32)
+ * @font_w: bit array of supported widths (1 - FB_MAX_BLIT_WIDTH)
+ * @font_h: bit array of supported heights (1 - FB_MAX_BLIT_HEIGHT)
*
* Get the default font for a specified screen size.
* Dimensions are in pixels.
*
+ * font_w or font_h being NULL means all values are supported.
+ *
* Returns %NULL if no font is found, or a pointer to the
* chosen font.
*
*/
-const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
- u32 font_h)
+const struct font_desc *get_default_font(int xres, int yres,
+ unsigned long *font_w,
+ unsigned long *font_h)
{
int i, c, cc, res;
const struct font_desc *f, *g;
@@ -135,8 +138,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1U << (f->width - 1))) &&
- (font_h & (1U << (f->height - 1))))
+ if ((!font_w || test_bit(f->width - 1, font_w)) &&
+ (!font_h || test_bit(f->height - 1, font_h)))
c += 1000;
if (c > cc) {
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 7830a9e64e..e17d520f53 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
- * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
+ * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
*
* For corner cases with UBSAN, try testing with:
*
@@ -15,18 +15,73 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* We don't need to fill dmesg with the fortify WARNs during testing. */
+#ifdef DEBUG
+# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
+# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
+#else
+# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
+# define FORTIFY_WARN_KUNIT(x...) do { } while (0)
+#endif
+
+/* Redefine fortify_panic() to track failures. */
+void fortify_add_kunit_error(int write);
+#define fortify_panic(func, write, avail, size, retfail) do { \
+ FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
+ fortify_add_kunit_error(write); \
+ return (retfail); \
+} while (0)
+
+/* Redefine fortify_warn_once() to track memcpy() failures. */
+#define fortify_warn_once(chk_func, x...) do { \
+ bool __result = chk_func; \
+ FORTIFY_WARN_KUNIT(__result, x); \
+ if (__result) \
+ fortify_add_kunit_error(1); \
+} while (0)
+
#include <kunit/device.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+/* Handle being built without CONFIG_FORTIFY_SOURCE */
+#ifndef __compiletime_strlen
+# define __compiletime_strlen __builtin_strlen
+#endif
+
+static struct kunit_resource read_resource;
+static struct kunit_resource write_resource;
+static int fortify_read_overflows;
+static int fortify_write_overflows;
+
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
static char array_unknown[] = "compiler thinks I might change";
-static void known_sizes_test(struct kunit *test)
+void fortify_add_kunit_error(int write)
+{
+ struct kunit_resource *resource;
+ struct kunit *current_test;
+
+ current_test = kunit_get_current_test();
+ if (!current_test)
+ return;
+
+ resource = kunit_find_named_resource(current_test,
+ write ? "fortify_write_overflows"
+ : "fortify_read_overflows");
+ if (!resource)
+ return;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+}
+
+static void fortify_test_known_sizes(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
@@ -59,7 +114,7 @@ static noinline size_t want_minus_one(int pick)
return __compiletime_strlen(str);
}
-static void control_flow_split_test(struct kunit *test)
+static void fortify_test_control_flow_split(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
@@ -135,11 +190,11 @@ static volatile size_t unknown_size = 50;
#endif
#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
-static void alloc_size_##allocator##_const_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
{ \
CONST_TEST_BODY(TEST_##allocator); \
} \
-static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
{ \
DYNAMIC_TEST_BODY(TEST_##allocator); \
}
@@ -181,9 +236,6 @@ static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
kfree(p)); \
checker(expected_size, __kmalloc(alloc_size, gfp), \
kfree(p)); \
- checker(expected_size, \
- __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
- kfree(p)); \
\
orig = kmalloc(alloc_size, gfp); \
KUNIT_EXPECT_TRUE(test, orig != NULL); \
@@ -308,22 +360,737 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+static const char * const test_strs[] = {
+ "",
+ "Hello there",
+ "A longer string, just for variety",
+};
+
+#define TEST_realloc(checker) do { \
+ gfp_t gfp = GFP_KERNEL; \
+ size_t len; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
+ len = strlen(test_strs[i]); \
+ KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
+ checker(len, kmemdup_array(test_strs[i], 1, len, gfp), \
+ kfree(p)); \
+ checker(len, kmemdup(test_strs[i], len, gfp), \
+ kfree(p)); \
+ } \
+} while (0)
+static void fortify_test_realloc_size(struct kunit *test)
+{
+ TEST_realloc(check_dynamic);
+}
+
+/*
+ * We can't have an array at the end of a structure or else
+ * builds without -fstrict-flex-arrays=3 will report them as
+ * being an unknown length. Additionally, add bytes before
+ * and after the string to catch over/underflows if tests
+ * fail.
+ */
+struct fortify_padding {
+ unsigned long bytes_before;
+ char buf[32];
+ unsigned long bytes_after;
+};
+/* Force compiler into not being able to resolve size at compile-time. */
+static volatile int unconst;
+
+static void fortify_test_strlen(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+}
+
+static void fortify_test_strnlen(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strnlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* A truncated strnlen() will be safe, too. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
+ sizeof(pad.buf) / 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ /* Reading beyond with strncpy() will fail. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ /* Early-truncated is safe still, though. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ end = sizeof(pad.buf) / 2;
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_strcpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) + 1] = { };
+ int i;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 2; i++)
+ src[i] = i + '0';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ src[sizeof(src) - 2] = 'A';
+ /* But now we trip the overflow checking. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ src[sizeof(src) - 1] = 'A';
+ /* And for sure now, two bytes past. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ /*
+ * Which trips both the strlen() on the unterminated src,
+ * and the resulting copy attempt.
+ */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strncpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate (though unterminated) max-size strncpy. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* No trailing %NUL -- thanks strncpy API. */
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* But we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And further... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 2)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strscpy(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strscpy() 1 less than of max size. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Keeping space for %NUL, last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate max-size strscpy. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* A trailing %NUL will exist. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And much further... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(src) * 2 + unconst),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strcat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) / 2] = { };
+ char one[] = "A";
+ char two[] = "BC";
+ int i;
+
+ /* Fill 15 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 1; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strncat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+
+ /* Fill 31 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strncat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ /* This will have tripped both strlen() and strcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void fortify_test_strlcat(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+ int len = sizeof(pad.buf) + unconst;
+
+ /* Fill 15 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strlcat() using less than half max size. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strlcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated source, and overflow. */
+ memset(src, 'B', sizeof(src));
+ pad.buf[sizeof(pad.buf) - 1] = '\0';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+/* Check for 0-sized arrays... */
+struct fortify_zero_sized {
+ unsigned long bytes_before;
+ char buf[0];
+ unsigned long bytes_after;
+};
+
+#define __fortify_test(memfunc) \
+static void fortify_test_##memfunc(struct kunit *test) \
+{ \
+ struct fortify_zero_sized zero = { }; \
+ struct fortify_padding pad = { }; \
+ char srcA[sizeof(pad.buf) + 2]; \
+ char srcB[sizeof(pad.buf) + 2]; \
+ size_t len = sizeof(pad.buf) + unconst; \
+ \
+ memset(srcA, 'A', sizeof(srcA)); \
+ KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
+ memset(srcB, 'B', sizeof(srcB)); \
+ KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
+ \
+ memfunc(pad.buf, srcA, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len - 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len + 1); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+ memfunc(pad.buf + 1, srcB, len); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
+ \
+ /* Reset error counter. */ \
+ fortify_write_overflows = 0; \
+ /* Copy nothing into nothing: no errors. */ \
+ memfunc(zero.buf, srcB, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ /* We currently explicitly ignore zero-sized dests. */ \
+ memfunc(zero.buf, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+}
+__fortify_test(memcpy)
+__fortify_test(memmove)
+
+static void fortify_test_memscan(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memchr(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memchr_inv(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + 1;
+ char needle = 'W';
+ size_t len = sizeof(haystack) + unconst;
+
+ /* Normal search is okay. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_memcmp(struct kunit *test)
+{
+ char one[] = "My mind is going ...";
+ char two[] = "My mind is going ... I can feel it.";
+ size_t one_len = sizeof(one) + unconst - 1;
+ size_t two_len = sizeof(two) + unconst - 1;
+
+ /* We match the first string (ignoring the %NUL). */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Still in bounds, but no longer matching. */
+ KUNIT_ASSERT_LT(test, memcmp(one, two, one_len + 1), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Catch too-large ranges. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+
+ KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void fortify_test_kmemdup(struct kunit *test)
+{
+ char src[] = "I got Doom running on it!";
+ char *copy;
+ size_t len = sizeof(src) + unconst;
+
+ /* Copy is within bounds. */
+ copy = kmemdup(src, len, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Without %NUL. */
+ copy = kmemdup(src, len - 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Tiny bounds. */
+ copy = kmemdup(src, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Out of bounds by 1 byte. */
+ copy = kmemdup(src, len + 1, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ kfree(copy);
+
+ /* Way out of bounds. */
+ copy = kmemdup(src, len * 2, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ kfree(copy);
+
+ /* Starting offset causing out of bounds. */
+ copy = kmemdup(src + 1, len, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ kfree(copy);
+}
+
+static int fortify_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
+ kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
+
+ fortify_read_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &read_resource,
+ "fortify_read_overflows",
+ &fortify_read_overflows);
+ fortify_write_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &write_resource,
+ "fortify_write_overflows",
+ &fortify_write_overflows);
+ return 0;
+}
+
static struct kunit_case fortify_test_cases[] = {
- KUNIT_CASE(known_sizes_test),
- KUNIT_CASE(control_flow_split_test),
- KUNIT_CASE(alloc_size_kmalloc_const_test),
- KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_vmalloc_const_test),
- KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_kvmalloc_const_test),
- KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
+ KUNIT_CASE(fortify_test_known_sizes),
+ KUNIT_CASE(fortify_test_control_flow_split),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_realloc_size),
+ KUNIT_CASE(fortify_test_strlen),
+ KUNIT_CASE(fortify_test_strnlen),
+ KUNIT_CASE(fortify_test_strcpy),
+ KUNIT_CASE(fortify_test_strncpy),
+ KUNIT_CASE(fortify_test_strscpy),
+ KUNIT_CASE(fortify_test_strcat),
+ KUNIT_CASE(fortify_test_strncat),
+ KUNIT_CASE(fortify_test_strlcat),
+ /* skip memset: performs bounds checking on whole structs */
+ KUNIT_CASE(fortify_test_memcpy),
+ KUNIT_CASE(fortify_test_memmove),
+ KUNIT_CASE(fortify_test_memscan),
+ KUNIT_CASE(fortify_test_memchr),
+ KUNIT_CASE(fortify_test_memchr_inv),
+ KUNIT_CASE(fortify_test_memcmp),
+ KUNIT_CASE(fortify_test_kmemdup),
{}
};
static struct kunit_suite fortify_test_suite = {
.name = "fortify",
+ .init = fortify_test_init,
.test_cases = fortify_test_cases,
};
diff --git a/lib/fw_table.c b/lib/fw_table.c
index c3569d2ba5..1629181445 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -127,6 +127,7 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
*
* @id: table id (for debugging purposes)
* @table_size: size of the root table
+ * @max_length: maximum size of the table (ignore if 0)
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
@@ -148,18 +149,21 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
union fw_table_header *table_header,
+ unsigned long max_length,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
- unsigned long table_end, subtable_len, entry_len;
+ unsigned long table_len, table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
enum acpi_subtable_type type;
int count = 0;
int i;
type = acpi_get_subtable_type(id);
- table_end = (unsigned long)table_header +
- acpi_table_get_length(type, table_header);
+ table_len = acpi_table_get_length(type, table_header);
+ if (max_length && max_length < table_len)
+ table_len = max_length;
+ table_end = (unsigned long)table_header + table_len;
/* Parse all entries looking for a match. */
@@ -208,7 +212,8 @@ int __init_or_fwtbl_lib
cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg,
void *arg,
- struct acpi_table_cdat *table_header)
+ struct acpi_table_cdat *table_header,
+ unsigned long length)
{
struct acpi_subtable_proc proc = {
.id = type,
@@ -222,6 +227,6 @@ cdat_table_parse(enum acpi_cdat_type type,
return acpi_parse_entries_array(ACPI_SIG_CDAT,
sizeof(struct acpi_table_cdat),
(union fw_table_header *)table_header,
- &proc, 1, 0);
+ length, &proc, 1, 0);
}
EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 41f1bcdc44..aaefb9b678 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,7 +5,7 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
struct genradix_node {
@@ -14,13 +14,13 @@ struct genradix_node {
struct genradix_node *children[GENRADIX_ARY];
/* Leaf: */
- u8 data[PAGE_SIZE];
+ u8 data[GENRADIX_NODE_SIZE];
};
};
static inline int genradix_depth_shift(unsigned depth)
{
- return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
}
/*
@@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth)
/* depth that's needed for a genradix that can address up to ULONG_MAX: */
#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
@@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr);
static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
{
- struct genradix_node *node;
-
- node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
-
- /*
- * We're using pages (not slab allocations) directly for kernel data
- * structures, so we need to explicitly inform kmemleak of them in order
- * to avoid false positive memory leak reports.
- */
- kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
- return node;
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
}
static inline void genradix_free_node(struct genradix_node *node)
{
- kmemleak_free(node);
- free_page((unsigned long)node);
+ kfree(node);
}
/*
@@ -200,7 +189,7 @@ restart:
i++;
iter->offset = round_down(iter->offset + objs_per_ptr,
objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) *
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
goto restart;
@@ -209,7 +198,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek);
@@ -235,7 +224,7 @@ restart:
if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
iter->offset = genradix_depth_size(level);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
iter->offset -= obj_size_plus_page_remainder;
iter->pos--;
@@ -251,7 +240,7 @@ restart:
size_t objs_per_ptr = genradix_depth_size(level);
iter->offset = round_down(iter->offset, objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
if (!iter->offset)
return NULL;
@@ -267,7 +256,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek_prev);
@@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
{
size_t offset;
- for (offset = 0; offset < size; offset += PAGE_SIZE)
+ for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
return -ENOMEM;
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 5de7c04e05..2fd5712fb7 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -16,9 +16,8 @@
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
@@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
+#endif
/**
* __ioread32_copy - copy data from MMIO space, in 32-bit units
@@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy);
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite64_copy
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
@@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
__iowrite32_copy(to, from, count * 2);
#endif
}
-
EXPORT_SYMBOL_GPL(__iowrite64_copy);
+#endif
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index cf2eb2b2f9..4a6a9f419b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -691,12 +691,11 @@ EXPORT_SYMBOL(iov_iter_discard);
static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
+ const struct iovec *iov = iter_iov(i);
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len > size)
@@ -706,34 +705,36 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
if ((unsigned long)(iov->iov_base + skip) & addr_mask)
return false;
+ iov++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
- size_t size = i->count;
+ const struct bio_vec *bvec = i->bvec;
unsigned skip = i->iov_offset;
- unsigned k;
+ size_t size = i->count;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
+ do {
+ size_t len = bvec->bv_len;
if (len > size)
len = size;
if (len & len_mask)
return false;
- if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
return false;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
@@ -777,13 +778,12 @@ EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
+ const struct iovec *iov = iter_iov(i);
unsigned long res = 0;
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len) {
res |= (unsigned long)iov->iov_base + skip;
@@ -791,30 +791,31 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
len = size;
res |= len;
size -= len;
- if (!size)
- break;
}
- }
+ iov++;
+ skip = 0;
+ } while (size);
return res;
}
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
+ const struct bio_vec *bvec = i->bvec;
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
- res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ do {
+ size_t len = bvec->bv_len - skip;
+ res |= (unsigned long)bvec->bv_offset + skip;
if (len > size)
len = size;
res |= len;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return res;
}
@@ -1143,11 +1144,12 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
EXPORT_SYMBOL(dup_iter);
static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
- const struct iovec __user *uvec, unsigned long nr_segs)
+ const struct iovec __user *uvec, u32 nr_segs)
{
const struct compat_iovec __user *uiov =
(const struct compat_iovec __user *)uvec;
- int ret = -EFAULT, i;
+ int ret = -EFAULT;
+ u32 i;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 12f5a347aa..a8b2eed905 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -5,13 +5,14 @@
* Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
*/
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kfifo.h>
#include <linux/log2.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/kfifo.h>
/*
* internal helper to calculate the unused elements in a fifo
@@ -163,6 +164,19 @@ unsigned int __kfifo_out_peek(struct __kfifo *fifo,
}
EXPORT_SYMBOL(__kfifo_out_peek);
+unsigned int __kfifo_out_linear(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n)
+{
+ unsigned int size = fifo->mask + 1;
+ unsigned int off = fifo->out & fifo->mask;
+
+ if (tail)
+ *tail = off;
+
+ return min3(n, fifo->in - fifo->out, size - off);
+}
+EXPORT_SYMBOL(__kfifo_out_linear);
+
unsigned int __kfifo_out(struct __kfifo *fifo,
void *buf, unsigned int len)
{
@@ -292,51 +306,31 @@ int __kfifo_to_user(struct __kfifo *fifo, void __user *to,
}
EXPORT_SYMBOL(__kfifo_to_user);
-static int setup_sgl_buf(struct scatterlist *sgl, void *buf,
- int nents, unsigned int len)
+static unsigned int setup_sgl_buf(struct __kfifo *fifo, struct scatterlist *sgl,
+ unsigned int data_offset, int nents,
+ unsigned int len, dma_addr_t dma)
{
- int n;
- unsigned int l;
- unsigned int off;
- struct page *page;
+ const void *buf = fifo->data + data_offset;
- if (!nents)
+ if (!nents || !len)
return 0;
- if (!len)
- return 0;
+ sg_set_buf(sgl, buf, len);
- n = 0;
- page = virt_to_page(buf);
- off = offset_in_page(buf);
- l = 0;
-
- while (len >= l + PAGE_SIZE - off) {
- struct page *npage;
-
- l += PAGE_SIZE;
- buf += PAGE_SIZE;
- npage = virt_to_page(buf);
- if (page_to_phys(page) != page_to_phys(npage) - l) {
- sg_set_page(sgl, page, l - off, off);
- sgl = sg_next(sgl);
- if (++n == nents || sgl == NULL)
- return n;
- page = npage;
- len -= l - off;
- l = off = 0;
- }
+ if (dma != DMA_MAPPING_ERROR) {
+ sg_dma_address(sgl) = dma + data_offset;
+ sg_dma_len(sgl) = len;
}
- sg_set_page(sgl, page, len, off);
- return n + 1;
+
+ return 1;
}
static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
- int nents, unsigned int len, unsigned int off)
+ int nents, unsigned int len, unsigned int off, dma_addr_t dma)
{
unsigned int size = fifo->mask + 1;
unsigned int esize = fifo->esize;
- unsigned int l;
+ unsigned int len_to_end;
unsigned int n;
off &= fifo->mask;
@@ -345,16 +339,17 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl,
size *= esize;
len *= esize;
}
- l = min(len, size - off);
+ len_to_end = min(len, size - off);
- n = setup_sgl_buf(sgl, fifo->data + off, nents, l);
- n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l);
+ n = setup_sgl_buf(fifo, sgl, off, nents, len_to_end, dma);
+ n += setup_sgl_buf(fifo, sgl + n, 0, nents - n, len - len_to_end, dma);
return n;
}
unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len)
+ struct scatterlist *sgl, int nents, unsigned int len,
+ dma_addr_t dma)
{
unsigned int l;
@@ -362,12 +357,13 @@ unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo,
if (len > l)
len = l;
- return setup_sgl(fifo, sgl, nents, len, fifo->in);
+ return setup_sgl(fifo, sgl, nents, len, fifo->in, dma);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare);
unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len)
+ struct scatterlist *sgl, int nents, unsigned int len,
+ dma_addr_t dma)
{
unsigned int l;
@@ -375,7 +371,7 @@ unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo,
if (len > l)
len = l;
- return setup_sgl(fifo, sgl, nents, len, fifo->out);
+ return setup_sgl(fifo, sgl, nents, len, fifo->out, dma);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare);
@@ -473,6 +469,19 @@ unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf,
}
EXPORT_SYMBOL(__kfifo_out_peek_r);
+unsigned int __kfifo_out_linear_r(struct __kfifo *fifo,
+ unsigned int *tail, unsigned int n, size_t recsize)
+{
+ if (fifo->in == fifo->out)
+ return 0;
+
+ if (tail)
+ *tail = fifo->out + recsize;
+
+ return min(n, __kfifo_peek_n(fifo, recsize));
+}
+EXPORT_SYMBOL(__kfifo_out_linear_r);
+
unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
unsigned int len, size_t recsize)
{
@@ -546,7 +555,8 @@ int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to,
EXPORT_SYMBOL(__kfifo_to_user_r);
unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma)
{
BUG_ON(!nents);
@@ -555,7 +565,7 @@ unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo,
if (len + recsize > kfifo_unused(fifo))
return 0;
- return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize);
+ return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize, dma);
}
EXPORT_SYMBOL(__kfifo_dma_in_prepare_r);
@@ -569,7 +579,8 @@ void __kfifo_dma_in_finish_r(struct __kfifo *fifo,
EXPORT_SYMBOL(__kfifo_dma_in_finish_r);
unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
- struct scatterlist *sgl, int nents, unsigned int len, size_t recsize)
+ struct scatterlist *sgl, int nents, unsigned int len, size_t recsize,
+ dma_addr_t dma)
{
BUG_ON(!nents);
@@ -578,15 +589,7 @@ unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo,
if (len + recsize > fifo->in - fifo->out)
return 0;
- return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize);
+ return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize, dma);
}
EXPORT_SYMBOL(__kfifo_dma_out_prepare_r);
-void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize)
-{
- unsigned int len;
-
- len = __kfifo_peek_n(fifo, recsize);
- fifo->out += len + recsize;
-}
-EXPORT_SYMBOL(__kfifo_dma_out_finish_r);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index fb9a2f06dd..b7f2fa08d9 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -30,7 +30,7 @@
#include <net/net_namespace.h>
-u64 uevent_seqnum;
+atomic64_t uevent_seqnum;
#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif
@@ -42,10 +42,9 @@ struct uevent_sock {
#ifdef CONFIG_NET
static LIST_HEAD(uevent_sock_list);
-#endif
-
-/* This lock protects uevent_seqnum and uevent_sock_list */
+/* This lock protects uevent_sock_list */
static DEFINE_MUTEX(uevent_sock_mutex);
+#endif
/* the strings here must match the enum in include/linux/kobject.h */
static const char *kobject_actions[] = {
@@ -315,6 +314,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
int retval = 0;
/* send netlink message */
+ mutex_lock(&uevent_sock_mutex);
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
@@ -334,6 +334,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
+ mutex_unlock(&uevent_sock_mutex);
consume_skb(skb);
return retval;
@@ -432,8 +433,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
len = strlen(env->envp[i]) + 1;
if (i != env->envp_idx - 1) {
+ /* @env->envp[] contains pointers to @env->buf[]
+ * with @env->buflen chars, and we are removing
+ * variable MODALIAS here pointed by @env->envp[i]
+ * with length @len as shown below:
+ *
+ * 0 @env->buf[] @env->buflen
+ * ---------------------------------------------
+ * ^ ^ ^ ^
+ * | |-> @len <-| target block |
+ * @env->envp[0] @env->envp[i] @env->envp[i + 1]
+ *
+ * so the "target block" indicated above is moved
+ * backward by @len, and its right size is
+ * @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
+ */
memmove(env->envp[i], env->envp[i + 1],
- env->buflen - len);
+ env->buflen - (env->envp[i + 1] - env->envp[0]));
for (j = i; j < env->envp_idx - 1; j++)
env->envp[j] = env->envp[j + 1] - len;
@@ -583,16 +599,14 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
break;
}
- mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
- retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
- if (retval) {
- mutex_unlock(&uevent_sock_mutex);
+ retval = add_uevent_var(env, "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
+ if (retval)
goto exit;
- }
+
retval = kobject_uevent_net_broadcast(kobj, env, action_string,
devpath);
- mutex_unlock(&uevent_sock_mutex);
#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
@@ -688,7 +702,8 @@ static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
int ret;
/* bump and prepare sequence number */
- ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
if (ret < 0 || (size_t)ret >= sizeof(buf))
return -ENOMEM;
ret++;
@@ -742,9 +757,7 @@ static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EPERM;
}
- mutex_lock(&uevent_sock_mutex);
ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
- mutex_unlock(&uevent_sock_mutex);
return ret;
}
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 68a6daec0a..34d7242d52 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -24,6 +24,17 @@ config KUNIT_DEBUGFS
test suite, which allow users to see results of the last test suite
run that occurred.
+config KUNIT_FAULT_TEST
+ bool "Enable KUnit tests which print BUG stacktraces"
+ depends on KUNIT_TEST
+ depends on !UML
+ default y
+ help
+ Enables fault handling tests for the KUnit framework. These tests may
+ trigger a kernel BUG(), and the associated stack trace, even when they
+ pass. If this conflicts with your test infrastrcture (or is confusing
+ or annoying), they can be disabled by setting this to N.
+
config KUNIT_TEST
tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
default KUNIT_ALL_TESTS
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index 3a31fe9ed6..25c81ed465 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -36,7 +36,7 @@ struct kunit_device {
#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
-static struct bus_type kunit_bus_type = {
+static const struct bus_type kunit_bus_type = {
.name = "kunit",
};
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 689fff2b2b..70b9a43cd2 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -33,13 +33,13 @@ static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
-module_param_named(filter_glob, filter_glob_param, charp, 0400);
+module_param_named(filter_glob, filter_glob_param, charp, 0600);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
-module_param_named(filter, filter_param, charp, 0400);
+module_param_named(filter, filter_param, charp, 0600);
MODULE_PARM_DESC(filter,
"Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
-module_param_named(filter_action, filter_action_param, charp, 0400);
+module_param_named(filter_action, filter_action_param, charp, 0600);
MODULE_PARM_DESC(filter_action,
"Changes behavior of filtered tests using attributes, valid values are:\n"
"<none>: do not run filtered tests as normal\n"
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index f7980ef236..e3412e0ca3 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -109,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = {
.test_cases = kunit_try_catch_test_cases,
};
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+
+static void kunit_test_null_dereference(void *data)
+{
+ struct kunit *test = data;
+ int *null = NULL;
+
+ *null = 0;
+
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_fault_null_dereference(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_null_dereference,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR);
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+
+static struct kunit_case kunit_fault_test_cases[] = {
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+ KUNIT_CASE(kunit_test_fault_null_dereference),
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+ {}
+};
+
+static struct kunit_suite kunit_fault_test_suite = {
+ .name = "kunit_fault",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_fault_test_cases,
+};
+
/*
* Context for testing test managed resources
* is_resource_initialized is used to test arbitrary resources
@@ -826,6 +868,7 @@ static struct kunit_suite kunit_current_test_suite = {
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
- &kunit_current_test_suite, &kunit_device_test_suite);
+ &kunit_current_test_suite, &kunit_device_test_suite,
+ &kunit_fault_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 03fb511826..7511442ea9 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -22,18 +22,10 @@ struct string_stream_test_priv {
};
/* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
-static void kfree_wrapper(void *p)
-{
- kfree(p);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
/* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */
-static void cleanup_raw_stream(void *p)
-{
- struct string_stream *stream = p;
-
- string_stream_destroy(stream);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *);
static char *get_concatenated_string(struct kunit *test, struct string_stream *stream)
{
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index d9d1df28cc..6bbe0025b0 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -18,7 +18,7 @@
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
{
try_catch->try_result = -EFAULT;
- kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
+ kthread_exit(0);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
@@ -26,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data)
{
struct kunit_try_catch *try_catch = data;
+ try_catch->try_result = -EINTR;
try_catch->try(try_catch->context);
+ if (try_catch->try_result == -EINTR)
+ try_catch->try_result = 0;
- kthread_complete_and_exit(try_catch->try_completion, 0);
+ return 0;
}
static unsigned long kunit_test_timeout(void)
@@ -58,27 +61,33 @@ static unsigned long kunit_test_timeout(void)
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
{
- DECLARE_COMPLETION_ONSTACK(try_completion);
struct kunit *test = try_catch->test;
struct task_struct *task_struct;
+ struct completion *task_done;
int exit_code, time_remaining;
try_catch->context = context;
- try_catch->try_completion = &try_completion;
try_catch->try_result = 0;
task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
try_catch, "kunit_try_catch_thread");
if (IS_ERR(task_struct)) {
+ try_catch->try_result = PTR_ERR(task_struct);
try_catch->catch(try_catch->context);
return;
}
get_task_struct(task_struct);
+ /*
+ * As for a vfork(2), task_struct->vfork_done (pointing to the
+ * underlying kthread->exited) can be used to wait for the end of a
+ * kernel thread. It is set to NULL when the thread exits, so we
+ * keep a copy here.
+ */
+ task_done = task_struct->vfork_done;
wake_up_process(task_struct);
- time_remaining = wait_for_completion_timeout(&try_completion,
+ time_remaining = wait_for_completion_timeout(task_done,
kunit_test_timeout());
if (time_remaining == 0) {
- kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT;
kthread_stop(task_struct);
}
@@ -91,8 +100,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
if (exit_code == -EFAULT)
try_catch->try_result = 0;
- else if (exit_code == -EINTR)
- kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code == -EINTR) {
+ if (test->last_seen.file)
+ kunit_err(test, "try faulted: last line seen %s:%d\n",
+ test->last_seen.file, test->last_seen.line);
+ else
+ kunit_err(test, "try faulted\n");
+ } else if (exit_code == -ETIMEDOUT)
+ kunit_err(test, "try timed out\n");
else if (exit_code)
kunit_err(test, "Unknown error: %d\n", exit_code);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 859b67c4d6..27e0c8ee71 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -194,7 +194,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
struct bvec_test_range {
@@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -359,7 +359,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static void iov_kunit_destroy_xarray(void *data)
@@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -516,7 +516,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
}
stop:
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static struct kunit_case __refdata iov_kunit_cases[] = {
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
deleted file mode 100644
index dcc912b347..0000000000
--- a/lib/livepatch/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for livepatch test code.
-
-obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
- test_klp_callbacks_demo.o \
- test_klp_callbacks_demo2.o \
- test_klp_callbacks_busy.o \
- test_klp_callbacks_mod.o \
- test_klp_livepatch.o \
- test_klp_shadow_vars.o \
- test_klp_state.o \
- test_klp_state2.o \
- test_klp_state3.o
diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c
deleted file mode 100644
index 5af7093ca0..0000000000
--- a/lib/livepatch/test_klp_atomic_replace.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-#include <linux/seq_file.h>
-static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "meminfo_proc_show",
- .new_func = livepatch_meminfo_proc_show,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, {}
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_atomic_replace_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_atomic_replace_exit(void)
-{
-}
-
-module_init(test_klp_atomic_replace_init);
-module_exit(test_klp_atomic_replace_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: atomic replace");
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
deleted file mode 100644
index 133929e0ce..0000000000
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-
-/* load/run-time control from sysfs writer */
-static bool block_transition;
-module_param(block_transition, bool, 0644);
-MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
-
-static void busymod_work_func(struct work_struct *work);
-static DECLARE_WORK(work, busymod_work_func);
-static DECLARE_COMPLETION(busymod_work_started);
-
-static void busymod_work_func(struct work_struct *work)
-{
- pr_info("%s enter\n", __func__);
- complete(&busymod_work_started);
-
- while (READ_ONCE(block_transition)) {
- /*
- * Busy-wait until the sysfs writer has acknowledged a
- * blocked transition and clears the flag.
- */
- msleep(20);
- }
-
- pr_info("%s exit\n", __func__);
-}
-
-static int test_klp_callbacks_busy_init(void)
-{
- pr_info("%s\n", __func__);
- schedule_work(&work);
-
- /*
- * To synchronize kernel messages, hold the init function from
- * exiting until the work function's entry message has printed.
- */
- wait_for_completion(&busymod_work_started);
-
- if (!block_transition) {
- /*
- * Serialize output: print all messages from the work
- * function before returning from init().
- */
- flush_work(&work);
- }
-
- return 0;
-}
-
-static void test_klp_callbacks_busy_exit(void)
-{
- WRITE_ONCE(block_transition, false);
- flush_work(&work);
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_busy_init);
-module_exit(test_klp_callbacks_busy_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: busy target module");
diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c
deleted file mode 100644
index 3fd8fe1cd1..0000000000
--- a/lib/livepatch/test_klp_callbacks_demo.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int pre_patch_ret;
-module_param(pre_patch_ret, int, 0644);
-MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return pre_patch_ret;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static void patched_work_func(struct work_struct *work)
-{
- pr_info("%s\n", __func__);
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_func busymod_funcs[] = {
- {
- .old_name = "busymod_work_func",
- .new_func = patched_work_func,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_mod",
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_busy",
- .funcs = busymod_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo");
diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c
deleted file mode 100644
index 5417573e80..0000000000
--- a/lib/livepatch/test_klp_callbacks_demo2.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return 0;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static struct klp_func no_funcs[] = {
- { }
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_callbacks_demo2_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo2_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo2_init);
-module_exit(test_klp_callbacks_demo2_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c
deleted file mode 100644
index 8fbe645b1c..0000000000
--- a/lib/livepatch/test_klp_callbacks_mod.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-static int test_klp_callbacks_mod_init(void)
-{
- pr_info("%s\n", __func__);
- return 0;
-}
-
-static void test_klp_callbacks_mod_exit(void)
-{
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_mod_init);
-module_exit(test_klp_callbacks_mod_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: target module");
diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c
deleted file mode 100644
index aff08199de..0000000000
--- a/lib/livepatch/test_klp_livepatch.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-#include <linux/seq_file.h>
-static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "cmdline_proc_show",
- .new_func = livepatch_cmdline_proc_show,
- }, { }
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_livepatch_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_livepatch_exit(void)
-{
-}
-
-module_init(test_klp_livepatch_init);
-module_exit(test_klp_livepatch_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch module");
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
deleted file mode 100644
index b991164908..0000000000
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/livepatch.h>
-#include <linux/slab.h>
-
-/*
- * Keep a small list of pointers so that we can print address-agnostic
- * pointer values. Use a rolling integer count to differentiate the values.
- * Ironically we could have used the shadow variable API to do this, but
- * let's not lean too heavily on the very code we're testing.
- */
-static LIST_HEAD(ptr_list);
-struct shadow_ptr {
- void *ptr;
- int id;
- struct list_head list;
-};
-
-static void free_ptr_list(void)
-{
- struct shadow_ptr *sp, *tmp_sp;
-
- list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
- list_del(&sp->list);
- kfree(sp);
- }
-}
-
-static int ptr_id(void *ptr)
-{
- struct shadow_ptr *sp;
- static int count;
-
- list_for_each_entry(sp, &ptr_list, list) {
- if (sp->ptr == ptr)
- return sp->id;
- }
-
- sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
- if (!sp)
- return -ENOMEM;
- sp->ptr = ptr;
- sp->id = count++;
-
- list_add(&sp->list, &ptr_list);
-
- return sp->id;
-}
-
-/*
- * Shadow variable wrapper functions that echo the function and arguments
- * to the kernel log for testing verification. Don't display raw pointers,
- * but use the ptr_id() value instead.
- */
-static void *shadow_get(void *obj, unsigned long id)
-{
- int **sv;
-
- sv = klp_shadow_get(obj, id);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
- __func__, ptr_id(obj), id, ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free(obj, id, dtor);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
- __func__, ptr_id(obj), id, ptr_id(dtor));
-}
-
-static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
-}
-
-
-/* Shadow variable constructor - remember simple pointer data */
-static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
-{
- int **sv = shadow_data;
- int **var = ctor_data;
-
- if (!var)
- return -EINVAL;
-
- *sv = *var;
- pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
-
- return 0;
-}
-
-/*
- * With more than one item to free in the list, order is not determined and
- * shadow_dtor will not be passed to shadow_free_all() which would make the
- * test fail. (see pass 6)
- */
-static void shadow_dtor(void *obj, void *shadow_data)
-{
- int **sv = shadow_data;
-
- pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
- __func__, ptr_id(obj), ptr_id(sv));
-}
-
-/* number of objects we simulate that need shadow vars */
-#define NUM_OBJS 3
-
-/* dynamically created obj fields have the following shadow var id values */
-#define SV_ID1 0x1234
-#define SV_ID2 0x1235
-
-/*
- * The main test case adds/removes new fields (shadow var) to each of these
- * test structure instances. The last group of fields in the struct represent
- * the idea that shadow variables may be added and removed to and from the
- * struct during execution.
- */
-struct test_object {
- /* add anything here below and avoid to define an empty struct */
- struct shadow_ptr sp;
-
- /* these represent shadow vars added and removed with SV_ID{1,2} */
- /* char nfield1; */
- /* int nfield2; */
-};
-
-static int test_klp_shadow_vars_init(void)
-{
- struct test_object objs[NUM_OBJS];
- char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
- char *pndup[NUM_OBJS];
- int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
- void **sv;
- int ret;
- int i;
-
- ptr_id(NULL);
-
- /*
- * With an empty shadow variable hash table, expect not to find
- * any matches.
- */
- sv = shadow_get(&objs[0], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
-
- /* pass 1: init & alloc a char+int pair of svars for each objs */
- for (i = 0; i < NUM_OBJS; i++) {
- pnfields1[i] = &nfields1[i];
- ptr_id(pnfields1[i]);
-
- if (i % 2) {
- sv1[i] = shadow_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- } else {
- sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- }
- if (!sv1[i]) {
- ret = -ENOMEM;
- goto out;
- }
-
- pnfields2[i] = &nfields2[i];
- ptr_id(pnfields2[i]);
- sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
- GFP_KERNEL, shadow_ctor, &pnfields2[i]);
- if (!sv2[i]) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- /* pass 2: verify we find allocated svars and where they point to */
- for (i = 0; i < NUM_OBJS; i++) {
- /* check the "char" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
-
- /* check the "int" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
- for (i = 0; i < NUM_OBJS; i++) {
- pndup[i] = &nfields1[i];
- ptr_id(pndup[i]);
-
- sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
- GFP_KERNEL, shadow_ctor, &pndup[i]);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
- }
-
- /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
- for (i = 0; i < NUM_OBJS; i++) {
- shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- free_ptr_list();
-
- return 0;
-out:
- shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- free_ptr_list();
-
- return ret;
-}
-
-static void test_klp_shadow_vars_exit(void)
-{
-}
-
-module_init(test_klp_shadow_vars_init);
-module_exit(test_klp_shadow_vars_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: shadow variables");
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
deleted file mode 100644
index 57a4253acb..0000000000
--- a/lib/livepatch/test_klp_state.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 1 does not support migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 1
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
deleted file mode 100644
index c978ea4d5e..0000000000
--- a/lib/livepatch/test_klp_state2.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 2 supports migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 2
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: space to store console_loglevel already allocated\n",
- __func__);
- return 0;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: taking over the console_loglevel change\n",
- __func__);
- loglevel_state->data = prev_loglevel_state->data;
- return;
- }
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: keeping space to store console_loglevel\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
deleted file mode 100644
index 9226579d10..0000000000
--- a/lib/livepatch/test_klp_state3.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-/* The console loglevel fix is the same in the next cumulative patch. */
-#include "test_klp_state2.c"
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index fe6092a1dc..2d7d27e6ae 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1307,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
}
/*
- * mas_node_count() - Check if enough nodes are allocated and request more if
- * there is not enough nodes.
+ * mas_node_count_gfp() - Check if enough nodes are allocated and request more
+ * if there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
* @gfp: the gfp flags
@@ -2271,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
- r_tmp = *mast->orig_r;
- l_tmp = *mast->orig_l;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 55a81782e2..191761b1b6 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/log2.h>
/* Not needed on 64bit architectures */
@@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
+ * Note that the algorithm after the if block below might lose
+ * some precision and the result is more exact for b > a. So
+ * exchange a and b if a is bigger than b.
+ *
+ * For example with a = 43980465100800, b = 100000000, c = 1000000000
+ * the below calculation doesn't modify b at all because div == 0
+ * and then shift becomes 45 + 26 - 62 = 9 and so the result
+ * becomes 4398035251080. However with a and b swapped the exact
+ * result is calculated (i.e. 4398046510080).
+ */
+ if (a > b)
+ swap(a, b);
+
+ /*
* (b * a) / c is equal to
*
* (b / c) * a +
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index d42cebf740..d3b64b10da 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -6,8 +6,6 @@
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
-
struct primes {
struct rcu_head rcu;
unsigned long last, sz;
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 30e00ef0bf..20ea9038c3 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
static void init_large(struct kunit *test)
{
- if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
- kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
-
/* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src));
@@ -496,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test)
}
}
-static void strtomem_test(struct kunit *test)
-{
- static const char input[sizeof(unsigned long)] = "hi";
- static const char truncate[] = "this is too long";
- struct {
- unsigned long canary1;
- unsigned char output[sizeof(unsigned long)] __nonstring;
- unsigned long canary2;
- } wrap;
-
- memset(&wrap, 0xFF, sizeof(wrap));
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
- "bad initial canary value");
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
- "bad initial canary value");
-
- /* Check unpadded copy leaves surroundings untouched. */
- strtomem(wrap.output, input);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated copy leaves surroundings untouched. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check padded copy leaves only string padded. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem_pad(wrap.output, input, 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated padded copy has no padding. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-}
-
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
@@ -555,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE_SLOW(memmove_test),
KUNIT_CASE_SLOW(memmove_large_test),
KUNIT_CASE_SLOW(memmove_overlap_test),
- KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/objagg.c b/lib/objagg.c
index 1e248629ed..1608895b00 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
{
void *delta_priv;
+ if (WARN_ON(!objagg_obj_is_root(parent)))
+ return -EINVAL;
+
delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
objagg_obj->obj);
if (IS_ERR(delta_priv))
@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
[OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
};
-static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- struct rhashtable *ht = arg->ht;
- struct objagg_hints *objagg_hints =
- container_of(ht, struct objagg_hints, node_ht);
- const struct objagg_ops *ops = objagg_hints->ops;
- const char *ptr = obj;
-
- ptr += ht->p.key_offset;
- return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
- memcmp(ptr, arg->key, ht->p.key_len);
-}
-
/**
* objagg_hints_get - obtains hints instance
* @objagg: objagg instance
@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
offsetof(struct objagg_hints_node, obj);
objagg_hints->ht_params.head_offset =
offsetof(struct objagg_hints_node, ht_node);
- objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
if (err)
diff --git a/lib/objpool.c b/lib/objpool.c
index cfdc024208..234f9d0bd0 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -50,7 +50,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
{
int i, cpu_count = 0;
- for (i = 0; i < pool->nr_cpus; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
struct objpool_slot *slot;
int nodes, size, rc;
@@ -60,8 +60,8 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
continue;
/* compute how many objects to be allocated with this slot */
- nodes = nr_objs / num_possible_cpus();
- if (cpu_count < (nr_objs % num_possible_cpus()))
+ nodes = nr_objs / pool->nr_possible_cpus;
+ if (cpu_count < (nr_objs % pool->nr_possible_cpus))
nodes++;
cpu_count++;
@@ -103,7 +103,7 @@ static void objpool_fini_percpu_slots(struct objpool_head *pool)
if (!pool->cpu_slots)
return;
- for (i = 0; i < pool->nr_cpus; i++)
+ for (i = 0; i < nr_cpu_ids; i++)
kvfree(pool->cpu_slots[i]);
kfree(pool->cpu_slots);
}
@@ -130,13 +130,13 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
/* initialize objpool pool */
memset(pool, 0, sizeof(struct objpool_head));
- pool->nr_cpus = nr_cpu_ids;
+ pool->nr_possible_cpus = num_possible_cpus();
pool->obj_size = object_size;
pool->capacity = capacity;
pool->gfp = gfp & ~__GFP_ZERO;
pool->context = context;
pool->release = release;
- slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
+ slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
pool->cpu_slots = kzalloc(slot_size, pool->gfp);
if (!pool->cpu_slots)
return -ENOMEM;
@@ -152,106 +152,6 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
}
EXPORT_SYMBOL_GPL(objpool_init);
-/* adding object to slot, abort if the slot was already full */
-static inline int
-objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- uint32_t head, tail;
-
- /* loading tail and head as a local snapshot, tail first */
- tail = READ_ONCE(slot->tail);
-
- do {
- head = READ_ONCE(slot->head);
- /* fault caught: something must be wrong */
- WARN_ON_ONCE(tail - head > pool->nr_objs);
- } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
-
- /* now the tail position is reserved for the given obj */
- WRITE_ONCE(slot->entries[tail & slot->mask], obj);
- /* update sequence to make this obj available for pop() */
- smp_store_release(&slot->last, tail + 1);
-
- return 0;
-}
-
-/* reclaim an object to object pool */
-int objpool_push(void *obj, struct objpool_head *pool)
-{
- unsigned long flags;
- int rc;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
- rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
- raw_local_irq_restore(flags);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(objpool_push);
-
-/* try to retrieve object from slot */
-static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- /* load head snapshot, other cpus may change it */
- uint32_t head = smp_load_acquire(&slot->head);
-
- while (head != READ_ONCE(slot->last)) {
- void *obj;
-
- /*
- * data visibility of 'last' and 'head' could be out of
- * order since memory updating of 'last' and 'head' are
- * performed in push() and pop() independently
- *
- * before any retrieving attempts, pop() must guarantee
- * 'last' is behind 'head', that is to say, there must
- * be available objects in slot, which could be ensured
- * by condition 'last != head && last - head <= nr_objs'
- * that is equivalent to 'last - head - 1 < nr_objs' as
- * 'last' and 'head' are both unsigned int32
- */
- if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
- head = READ_ONCE(slot->head);
- continue;
- }
-
- /* obj must be retrieved before moving forward head */
- obj = READ_ONCE(slot->entries[head & slot->mask]);
-
- /* move head forward to mark it's consumption */
- if (try_cmpxchg_release(&slot->head, &head, head + 1))
- return obj;
- }
-
- return NULL;
-}
-
-/* allocate an object from object pool */
-void *objpool_pop(struct objpool_head *pool)
-{
- void *obj = NULL;
- unsigned long flags;
- int i, cpu;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
-
- cpu = raw_smp_processor_id();
- for (i = 0; i < num_possible_cpus(); i++) {
- obj = objpool_try_get_slot(pool, cpu);
- if (obj)
- break;
- cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
- }
- raw_local_irq_restore(flags);
-
- return obj;
-}
-EXPORT_SYMBOL_GPL(objpool_pop);
-
/* release whole objpool forcely */
void objpool_free(struct objpool_head *pool)
{
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index c85c8b121d..d305b0c054 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -258,25 +258,84 @@ DEFINE_TEST_ARRAY(s64) = {
\
_of = check_ ## op ## _overflow(a, b, &_r); \
KUNIT_EXPECT_EQ_MSG(test, _of, of, \
- "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
KUNIT_EXPECT_EQ_MSG(test, _r, r, \
- "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
/* Check for internal macro side-effects. */ \
_of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
- KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
- KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ \
+ _r = wrapping_ ## op(t, a, b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == r, \
+ "expected wrap "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _a_orig = a; \
+ _b_orig = b; \
+ _r = wrapping_ ## op(t, _a_orig++, _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+} while (0)
+
+static int global_counter;
+static void bump_counter(void)
+{
+ global_counter++;
+}
+
+static int get_index(void)
+{
+ volatile int index = 0;
+ bump_counter();
+ return index;
+}
+
+#define check_self_op(fmt, op, sym, a, b) do { \
+ typeof(a + 0) _a = a; \
+ typeof(b + 0) _b = b; \
+ typeof(a + 0) _a_sym = a; \
+ typeof(a + 0) _a_orig[1] = { a }; \
+ typeof(b + 0) _b_orig = b; \
+ typeof(b + 0) _b_bump = b + 1; \
+ typeof(a + 0) _r; \
+ \
+ _a_sym sym _b; \
+ _r = wrapping_ ## op(_a, _b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _r); \
+ KUNIT_EXPECT_TRUE_MSG(test, _a == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _a); \
+ /* Check for internal macro side-effects. */ \
+ global_counter = 0; \
+ wrapping_ ## op(_a_orig[get_index()], _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, global_counter, 1, \
+ "Unexpected wrapping_" #op " macro side-effect on arg1!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrapping_" #op " macro side-effect on arg2!\n"); \
} while (0)
#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
{ \
+ /* check_{add,sub,mul}_overflow() and wrapping_{add,sub,mul} */ \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ /* wrapping_assign_{add,sub}() */ \
+ check_self_op(fmt, assign_add, +=, p->a, p->b); \
+ check_self_op(fmt, assign_add, +=, p->b, p->a); \
+ check_self_op(fmt, assign_sub, -=, p->a, p->b); \
} \
\
static void n ## _overflow_test(struct kunit *test) { \
@@ -1119,14 +1178,28 @@ struct foo {
s16 array[] __counted_by(counter);
};
+struct bar {
+ int a;
+ u32 counter;
+ s16 array[];
+};
+
static void DEFINE_FLEX_test(struct kunit *test)
{
- DEFINE_RAW_FLEX(struct foo, two, array, 2);
+ /* Using _RAW_ on a __counted_by struct will initialize "counter" to zero */
+ DEFINE_RAW_FLEX(struct foo, two_but_zero, array, 2);
+#if __has_attribute(__counted_by__)
+ int expected_raw_size = sizeof(struct foo);
+#else
+ int expected_raw_size = sizeof(struct foo) + 2 * sizeof(s16);
+#endif
+ /* Without annotation, it will always be on-stack size. */
+ DEFINE_RAW_FLEX(struct bar, two, array, 2);
DEFINE_FLEX(struct foo, eight, array, counter, 8);
DEFINE_FLEX(struct foo, empty, array, counter, 0);
- KUNIT_EXPECT_EQ(test, __struct_size(two),
- sizeof(struct foo) + sizeof(s16) + sizeof(s16));
+ KUNIT_EXPECT_EQ(test, __struct_size(two_but_zero), expected_raw_size);
+ KUNIT_EXPECT_EQ(test, __struct_size(two), sizeof(struct bar) + 2 * sizeof(s16));
KUNIT_EXPECT_EQ(test, __struct_size(eight), 24);
KUNIT_EXPECT_EQ(test, __struct_size(empty), sizeof(struct foo));
}
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
deleted file mode 100644
index 2829ddb0e3..0000000000
--- a/lib/pci_iomap.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implement the default iomap interfaces
- *
- * (C) Copyright 2004 Linus Torvalds
- */
-#include <linux/pci.h>
-#include <linux/io.h>
-
-#include <linux/export.h>
-
-#ifdef CONFIG_PCI
-/**
- * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (len <= offset || !start)
- return NULL;
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM)
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL(pci_iomap_range);
-
-/**
- * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
-
- if (flags & IORESOURCE_IO)
- return NULL;
-
- if (len <= offset || !start)
- return NULL;
-
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_MEM)
- return ioremap_wc(start, len);
-
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
-
-/**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL(pci_iomap);
-
-/**
- * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_wc_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc);
-
-/*
- * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
- * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
- * the different IOMAP ranges.
- *
- * But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
- * it here.
- *
- * NOTE! This default implementation assumes that if the architecture
- * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
- * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
- * and does not need unmapping with 'ioport_unmap()'.
- *
- * If you have different rules for your architecture, you need to
- * implement your own pci_iounmap() that knows the rules for where
- * and how IO vs MEM get mapped.
- *
- * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
- * from legacy <asm-generic/io.h> header file behavior. In particular,
- * it would seem to make sense to do the iounmap(p) for the non-IO-space
- * case here regardless, but that's not what the old header file code
- * did. Probably incorrectly, but this is meant to be bug-for-bug
- * compatible.
- */
-#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
-#endif
- iounmap(p);
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
-
-#endif /* CONFIG_PCI */
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 1c5420ff25..29127dd05d 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
ifdef CONFIG_CC_IS_CLANG
# clang ppc port does not yet support -maltivec when -msoft-float is
# enabled. A future release of clang will resolve this
-# https://bugs.llvm.org/show_bug.cgi?id=31177
+# https://llvm.org/pr31177
CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
@@ -33,27 +33,8 @@ CFLAGS_REMOVE_vpermxor8.o += -msoft-float
endif
endif
-# The GCC option -ffreestanding is required in order to compile code containing
-# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
-ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
-NEON_FLAGS := -ffreestanding
-# Enable <arm_neon.h>
-NEON_FLAGS += -isystem $(shell $(CC) -print-file-name=include)
-ifeq ($(ARCH),arm)
-NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
-endif
-CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
-ifeq ($(ARCH),arm64)
-CFLAGS_REMOVE_recov_neon_inner.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
-CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
-endif
-endif
-
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
+ cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@
targets += int1.c int2.c int4.c int8.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
@@ -75,10 +56,16 @@ targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
-CFLAGS_neon1.o += $(NEON_FLAGS)
-CFLAGS_neon2.o += $(NEON_FLAGS)
-CFLAGS_neon4.o += $(NEON_FLAGS)
-CFLAGS_neon8.o += $(NEON_FLAGS)
+CFLAGS_neon1.o += $(CC_FLAGS_FPU)
+CFLAGS_neon2.o += $(CC_FLAGS_FPU)
+CFLAGS_neon4.o += $(CC_FLAGS_FPU)
+CFLAGS_neon8.o += $(CC_FLAGS_FPU)
+CFLAGS_recov_neon_inner.o += $(CC_FLAGS_FPU)
+CFLAGS_REMOVE_neon1.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon2.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon4.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_neon8.o += $(CC_FLAGS_NO_FPU)
+CFLAGS_REMOVE_recov_neon_inner.o += $(CC_FLAGS_NO_FPU)
targets += neon1.c neon2.c neon4.c neon8.c
$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index cd0b9e95f4..863e2d3209 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -12,15 +12,14 @@
*/
#include <linux/raid/pq.h>
-#include <asm/fpu/api.h>
-#include <asm/vx-insn.h>
+#include <asm/fpu.h>
#define NSIZE 16
-static inline void LOAD_CONST(void)
+static __always_inline void LOAD_CONST(void)
{
- asm volatile("VREPIB %v24,7");
- asm volatile("VREPIB %v25,0x1d");
+ fpu_vrepib(24, 0x07);
+ fpu_vrepib(25, 0x1d);
}
/*
@@ -28,10 +27,7 @@ static inline void LOAD_CONST(void)
* vector register y left by 1 bit and stores the result in
* vector register x.
*/
-static inline void SHLBYTE(int x, int y)
-{
- asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
-}
+#define SHLBYTE(x, y) fpu_vab(x, y, y)
/*
* For each of the 16 bytes in the vector register y the MASK()
@@ -39,49 +35,17 @@ static inline void SHLBYTE(int x, int y)
* or 0x00 if the high bit is 0. The result is stored in vector
* register x.
*/
-static inline void MASK(int x, int y)
-{
- asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
-}
-
-static inline void AND(int x, int y, int z)
-{
- asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
-
-static inline void XOR(int x, int y, int z)
-{
- asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
+#define MASK(x, y) fpu_vesravb(x, y, 24)
-static inline void LOAD_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VLM %2,%3,0,%1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void STORE_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void COPY_VEC(int x, int y)
-{
- asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
-}
+#define AND(x, y, z) fpu_vn(x, y, z)
+#define XOR(x, y, z) fpu_vx(x, y, z)
+#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr)
+#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr)
+#define COPY_VEC(x, y) fpu_vlr(x, y)
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
@@ -114,7 +78,7 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6ae2ba8e06..dbbed19f8f 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl)
return ntbl;
- ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ ntbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl)
return NULL;
@@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
static struct lock_class_key __key;
- tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
+ gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets;
@@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht,
+int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params)
{
struct bucket_table *tbl;
@@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
+ alloc_tag_record(ht->alloc_tag);
+
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
@@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht,
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_init);
+EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
/**
* rhltable_init - initialize a new hash list table
@@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
*
* See documentation for rhashtable_init.
*/
-int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- err = rhashtable_init(&hlt->ht, params);
+ err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
}
-EXPORT_SYMBOL_GPL(rhltable_init);
+EXPORT_SYMBOL_GPL(rhltable_init_noprof);
static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg),
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 92c6b1fd89..5e2e93307f 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb,
/*
* See if we have deferred clears that we can batch move
*/
-static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
+static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
+ unsigned int depth, unsigned int alloc_hint, bool wrap)
{
- unsigned long mask;
+ unsigned long mask, word_mask;
- if (!READ_ONCE(map->cleared))
- return false;
+ guard(spinlock_irqsave)(&map->swap_lock);
+
+ if (!map->cleared) {
+ if (depth == 0)
+ return false;
+
+ word_mask = (~0UL) >> (BITS_PER_LONG - depth);
+ /*
+ * The current behavior is to always retry after moving
+ * ->cleared to word, and we change it to retry in case
+ * of any free bits. To avoid an infinite loop, we need
+ * to take wrap & alloc_hint into account, otherwise a
+ * soft lockup may occur.
+ */
+ if (!wrap && alloc_hint)
+ word_mask &= ~((1UL << alloc_hint) - 1);
+
+ return (READ_ONCE(map->word) & word_mask) != word_mask;
+ }
/*
* First get a stable cleared mask, setting the old mask to 0.
@@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
bool alloc_hint)
{
unsigned int bits_per_word;
+ int i;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
@@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return -ENOMEM;
}
+ for (i = 0; i < sb->map_nr; i++)
+ spin_lock_init(&sb->map[i].swap_lock);
+
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
- sbitmap_deferred_clear(&sb->map[i]);
+ sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -179,7 +201,7 @@ static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
alloc_hint, wrap);
if (nr != -1)
break;
- if (!sbitmap_deferred_clear(map))
+ if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
break;
} while (1);
@@ -494,18 +516,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
+ unsigned long val;
- sbitmap_deferred_clear(map);
- if (map->word == (1UL << (map_depth - 1)) - 1)
+ sbitmap_deferred_clear(map, 0, 0, 0);
+ val = READ_ONCE(map->word);
+ if (val == (1UL << (map_depth - 1)) - 1)
goto next;
- nr = find_first_zero_bit(&map->word, map_depth);
+ nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
- val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;
diff --git a/lib/sort.c b/lib/sort.c
index b399bf10d6..a0509088f8 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size,
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
const unsigned int lsbit = size & -size; /* Used to find parent */
+ size_t shift = 0;
if (!a) /* num < 2 || size == 0 */
return;
@@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size,
for (;;) {
size_t b, c, d;
- if (a) /* Building heap: sift down --a */
- a -= size;
- else if (n -= size) /* Sorting: Extract root to --n */
+ if (a) /* Building heap: sift down a */
+ a -= size << shift;
+ else if (n > 3 * size) { /* Sorting: Extract two largest elements */
+ n -= size;
do_swap(base, base + n, size, swap_func, priv);
- else /* Sort complete */
+ shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
+ a = size << shift;
+ n -= size;
+ do_swap(base + a, base + n, size, swap_func, priv);
+ } else if (n > size) { /* Sorting: Extract root */
+ n -= size;
+ do_swap(base, base + n, size, swap_func, priv);
+ } else { /* Sort complete */
break;
+ }
/*
* Sift element at "a" down into heap. This is the
@@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 91d2301fde..5ed34cc963 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -624,15 +624,8 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
* we won't be able to do that under the lock.
*/
if (unlikely(can_alloc && !READ_ONCE(new_pool))) {
- /*
- * Zero out zone modifiers, as we don't have specific zone
- * requirements. Keep the flags related to allocation in atomic
- * contexts, I/O, nolockdep.
- */
- alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
- alloc_flags |= __GFP_NOWARN;
- page = alloc_pages(alloc_flags, DEPOT_POOL_ORDER);
+ page = alloc_pages(gfp_nested_mask(alloc_flags),
+ DEPOT_POOL_ORDER);
if (page)
prealloc = page_address(page);
}
@@ -687,6 +680,14 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
+{
+ if (!handle)
+ return NULL;
+
+ return depot_fetch_stack(handle);
+}
+
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index 05947a2feb..3bc14d1ee8 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -63,7 +63,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+/*
+ * On m68k, if the leaf function test variable is longer than 8 bytes,
+ * the start of the stack frame moves. 8 is sufficiently large to
+ * test m68k char arrays, but leave it at 16 for other architectures.
+ */
+#ifdef CONFIG_M68K
+#define FILL_SIZE_STRING 8
+#else
#define FILL_SIZE_STRING 16
+#endif
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
@@ -165,19 +174,23 @@ static noinline void test_ ## name (struct kunit *test) \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
- KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 0, \
FETCH_ARG_ ## which(zero)); \
+ /* \
+ * Delay the sum test to here to do as little as \
+ * possible between the two leaf function calls. \
+ */ \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
stackinit_range_contains(fill_start, fill_size, \
target_start, target_size), \
- "stack fill missed target!? " \
+ "stackframe was not the same between calls!? " \
"(fill %zu wide, target offset by %d)\n", \
fill_size, \
(int)((ssize_t)(uintptr_t)fill_start - \
@@ -404,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
* These are expected to fail for most configurations because neither
* GCC nor Clang have a way to perform initialization of variables in
* non-code areas (i.e. in a switch statement before the first "case").
- * https://bugs.llvm.org/show_bug.cgi?id=44916
+ * https://llvm.org/pr44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
deleted file mode 100644
index e21be95514..0000000000
--- a/lib/strcat_kunit.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel module for testing 'strcat' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-static volatile int unconst;
-
-static void strcat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy does nothing. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* 4 characters copied in, stops at %NUL. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- /* 2 more characters copied in okay. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strncat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy of size 0 does nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Empty copy of size 1 does nothing too. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Copy of max 0 characters should do nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in, even if max is 8. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- KUNIT_EXPECT_EQ(test, dest[6], '\0');
- /* 2 characters copied in okay, 2 ignored. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strlcat_test(struct kunit *test)
-{
- char dest[8] = "";
- int len = sizeof(dest) + unconst;
-
- /* Destination is terminated. */
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy is size 0. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Size 1 should keep buffer terminated, report size of source only. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- /* 2 characters copied in okay, gets to 6 total. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 2 characters ignored if max size (7) reached. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 1 of 2 characters skipped, now at true max size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
- /* Everything else ignored, now at full size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
-}
-
-static struct kunit_case strcat_test_cases[] = {
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
- {}
-};
-
-static struct kunit_suite strcat_test_suite = {
- .name = "strcat",
- .test_cases = strcat_test_cases,
-};
-
-kunit_test_suite(strcat_test_suite);
-
-MODULE_LICENSE("GPL");
diff --git a/lib/string.c b/lib/string.c
index 6891d15ce9..966da44bfc 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -15,19 +15,20 @@
*/
#define __NO_FORTIFY
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/linkage.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <asm/rwonce.h>
#include <asm/unaligned.h>
-#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
-#include <asm/page.h>
#ifndef __HAVE_ARCH_STRNCASECMP
/**
@@ -103,8 +104,7 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRSCPY
-ssize_t strscpy(char *dest, const char *src, size_t count)
+ssize_t sized_strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
@@ -170,8 +170,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
return -E2BIG;
}
-EXPORT_SYMBOL(strscpy);
-#endif
+EXPORT_SYMBOL(sized_strscpy);
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7713f73e66..69ba49b853 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -18,12 +18,14 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
- * @units: units to use (powers of 1000 or 1024)
+ * @units: Units to use (powers of 1000 or 1024), whether to include space separator
* @buf: buffer to format to
* @len: length of buffer
*
@@ -37,11 +39,12 @@
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
+ enum string_size_units units_base = units & STRING_UNITS_MASK;
static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
+ "", "k", "M", "G", "T", "P", "E", "Z", "Y",
};
static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
+ "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi",
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@@ -66,7 +69,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
/* This is Napier's algorithm. Reduce the original block size to
*
- * coefficient * divisor[units]^i
+ * coefficient * divisor[units_base]^i
*
* we do the reduction so both coefficients are just under 32 bits so
* that multiplying them together won't overflow 64 bits and we keep
@@ -76,12 +79,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
* precision is in the coefficients.
*/
while (blk_size >> 32) {
- do_div(blk_size, divisor[units]);
+ do_div(blk_size, divisor[units_base]);
i++;
}
while (size >> 32) {
- do_div(size, divisor[units]);
+ do_div(size, divisor[units_base]);
i++;
}
@@ -90,8 +93,8 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
size *= blk_size;
/* and logarithmically reduce it until it's just under the divisor */
- while (size >= divisor[units]) {
- remainder = do_div(size, divisor[units]);
+ while (size >= divisor[units_base]) {
+ remainder = do_div(size, divisor[units_base]);
i++;
}
@@ -101,10 +104,10 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
- if (units == STRING_UNITS_2) {
+ if (units_base == STRING_UNITS_2) {
/* express the remainder as a decimal. It's currently the
* numerator of a fraction whose denominator is
- * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ * divisor[units_base], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
remainder >>= 10;
}
@@ -126,10 +129,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
- unit = units_str[units][i];
+ unit = units_str[units_base][i];
- return snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s%s%s%s", (u32)size, tmp,
+ (units & STRING_UNITS_NO_SPACE) ? "" : " ",
+ unit,
+ (units & STRING_UNITS_NO_BYTES) ? "" : "B");
}
EXPORT_SYMBOL(string_get_size);
@@ -826,40 +831,6 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
- * strscpy_pad() - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always %NUL terminated, unless it's zero-sized.
- *
- * If the source string is shorter than the destination buffer, zeros
- * the tail of the destination buffer.
- *
- * For full explanation of why you may want to consider using the
- * 'strscpy' functions please see the function docstring for strscpy().
- *
- * Returns:
- * * The number of characters copied (not including the trailing %NUL)
- * * -E2BIG if count is 0 or @src was truncated.
- */
-ssize_t strscpy_pad(char *dest, const char *src, size_t count)
-{
- ssize_t written;
-
- written = strscpy(dest, src, count);
- if (written < 0 || written == count - 1)
- return written;
-
- memset(dest + written + 1, 0, count - written - 1);
-
- return written;
-}
-EXPORT_SYMBOL(strscpy_pad);
-
-/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
@@ -1042,10 +1013,28 @@ EXPORT_SYMBOL(__read_overflow2_field);
void __write_overflow_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__write_overflow_field);
-void fortify_panic(const char *name)
+static const char * const fortify_func_name[] = {
+#define MAKE_FORTIFY_FUNC_NAME(func) [MAKE_FORTIFY_FUNC(func)] = #func
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC_NAME)
+#undef MAKE_FORTIFY_FUNC_NAME
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size)
+{
+ const u8 func = FORTIFY_REASON_FUNC(reason);
+ const bool write = FORTIFY_REASON_DIR(reason);
+ const char *name;
+
+ name = fortify_func_name[umin(func, FORTIFY_FUNC_UNKNOWN)];
+ WARN(1, "%s: detected buffer overflow: %zu byte %s of buffer size %zu\n",
+ name, size, str_read_write(!write), avail);
+}
+EXPORT_SYMBOL(__fortify_report);
+
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size)
{
- pr_emerg("detected buffer overflow in %s\n", name);
+ __fortify_report(reason, avail, size);
BUG();
}
-EXPORT_SYMBOL(fortify_panic);
+EXPORT_SYMBOL(__fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */
diff --git a/lib/test-string_helpers.c b/lib/string_helpers_kunit.c
index 9a68849a5d..c853046183 100644
--- a/lib/test-string_helpers.c
+++ b/lib/string_helpers_kunit.c
@@ -1,34 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Test cases for lib/string_helpers.c module.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
+#include <kunit/test.h>
+#include <linux/array_size.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
-static __init bool test_string_check_buf(const char *name, unsigned int flags,
- char *in, size_t p,
- char *out_real, size_t q_real,
- char *out_test, size_t q_test)
+static void test_string_check_buf(struct kunit *test,
+ const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
{
- if (q_real == q_test && !memcmp(out_test, out_real, q_test))
- return true;
-
- pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
-
- print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
- in, p, true);
- print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
- out_test, q_test, true);
- print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
- out_real, q_real, true);
-
- return false;
+ KUNIT_ASSERT_EQ_MSG(test, q_real, q_test, "name:%s", name);
+ KUNIT_EXPECT_MEMEQ_MSG(test, out_test, out_real, q_test,
+ "name:%s", name);
}
struct test_string {
@@ -37,7 +28,7 @@ struct test_string {
unsigned int flags;
};
-static const struct test_string strings[] __initconst = {
+static const struct test_string strings[] = {
{
.in = "\\f\\ \\n\\r\\t\\v",
.out = "\f\\ \n\r\t\v",
@@ -60,17 +51,19 @@ static const struct test_string strings[] __initconst = {
},
};
-static void __init test_string_unescape(const char *name, unsigned int flags,
- bool inplace)
+static void test_string_unescape(struct kunit *test,
+ const char *name, unsigned int flags,
+ bool inplace)
{
int q_real = 256;
- char *in = kmalloc(q_real, GFP_KERNEL);
- char *out_test = kmalloc(q_real, GFP_KERNEL);
- char *out_real = kmalloc(q_real, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, q_real, GFP_KERNEL);
int i, p = 0, q_test = 0;
- if (!in || !out_test || !out_real)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
for (i = 0; i < ARRAY_SIZE(strings); i++) {
const char *s = strings[i].in;
@@ -103,12 +96,8 @@ static void __init test_string_unescape(const char *name, unsigned int flags,
q_real = string_unescape(in, out_real, q_real, flags);
}
- test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
+ test_string_check_buf(test, name, flags, in, p - 1, out_real, q_real,
out_test, q_test);
-out:
- kfree(out_real);
- kfree(out_test);
- kfree(in);
}
struct test_string_1 {
@@ -123,7 +112,7 @@ struct test_string_2 {
};
#define TEST_STRING_2_DICT_0 NULL
-static const struct test_string_2 escape0[] __initconst = {{
+static const struct test_string_2 escape0[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\\f\\ \\n\\r\\t\\v",
@@ -221,7 +210,7 @@ static const struct test_string_2 escape0[] __initconst = {{
}};
#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
-static const struct test_string_2 escape1[] __initconst = {{
+static const struct test_string_2 escape1[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\f\\134\\040\n\\015\\011\v",
@@ -358,7 +347,7 @@ static const struct test_string_2 escape1[] __initconst = {{
/* terminator */
}};
-static const struct test_string strings_upper[] __initconst = {
+static const struct test_string strings_upper[] = {
{
.in = "abcdefgh1234567890test",
.out = "ABCDEFGH1234567890TEST",
@@ -369,7 +358,7 @@ static const struct test_string strings_upper[] __initconst = {
},
};
-static const struct test_string strings_lower[] __initconst = {
+static const struct test_string strings_lower[] = {
{
.in = "ABCDEFGH1234567890TEST",
.out = "abcdefgh1234567890test",
@@ -380,8 +369,8 @@ static const struct test_string strings_lower[] __initconst = {
},
};
-static __init const char *test_string_find_match(const struct test_string_2 *s2,
- unsigned int flags)
+static const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
{
const struct test_string_1 *s1 = s2->s1;
unsigned int i;
@@ -402,31 +391,31 @@ static __init const char *test_string_find_match(const struct test_string_2 *s2,
return NULL;
}
-static __init void
-test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
+static void
+test_string_escape_overflow(struct kunit *test,
+ const char *in, int p, unsigned int flags, const char *esc,
int q_test, const char *name)
{
int q_real;
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
- if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
- name, flags, q_test, q_real);
+ KUNIT_EXPECT_EQ_MSG(test, q_real, q_test, "name:%s: flags:%#x", name, flags);
}
-static __init void test_string_escape(const char *name,
- const struct test_string_2 *s2,
- unsigned int flags, const char *esc)
+static void test_string_escape(struct kunit *test, const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
{
size_t out_size = 512;
- char *out_test = kmalloc(out_size, GFP_KERNEL);
- char *out_real = kmalloc(out_size, GFP_KERNEL);
- char *in = kmalloc(256, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, 256, GFP_KERNEL);
int p = 0, q_test = 0;
int q_real;
- if (!out_test || !out_real || !in)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
for (; s2->in; s2++) {
const char *out;
@@ -462,62 +451,99 @@ static __init void test_string_escape(const char *name,
q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
- test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
+ test_string_check_buf(test, name, flags, in, p, out_real, q_real, out_test,
q_test);
- test_string_escape_overflow(in, p, flags, esc, q_test, name);
-
-out:
- kfree(in);
- kfree(out_real);
- kfree(out_test);
+ test_string_escape_overflow(test, in, p, flags, esc, q_test, name);
}
#define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
- do { \
- BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
- BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
- __test_string_get_size((size), (blk_size), (exp_result10), \
- (exp_result2)); \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+ do { \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size(test, (size), (blk_size), (exp_result10), \
+ (exp_result2)); \
} while (0)
-static __init void test_string_get_size_check(const char *units,
- const char *exp,
- char *res,
- const u64 size,
- const u64 blk_size)
+static void test_string_get_size_check(struct kunit *test,
+ const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
{
- if (!memcmp(res, exp, strlen(exp) + 1))
- return;
-
- res[string_get_size_maxbuf - 1] = '\0';
-
- pr_warn("Test 'test_string_get_size' failed!\n");
- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
+ KUNIT_EXPECT_MEMEQ_MSG(test, res, exp, strlen(exp) + 1,
+ "string_get_size(size = %llu, blk_size = %llu, units = %s)",
size, blk_size, units);
- pr_warn("expected: '%s', got '%s'\n", exp, res);
}
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
- const char *exp_result10,
- const char *exp_result2)
+static void __strchrcut(char *dst, const char *src, const char *cut)
+{
+ const char *from = src;
+ size_t len;
+
+ do {
+ len = strcspn(from, cut);
+ memcpy(dst, from, len);
+ dst += len;
+ from += len;
+ } while (*from++);
+ *dst = '\0';
+}
+
+static void __test_string_get_size_one(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2,
+ enum string_size_units units,
+ const char *cut)
{
char buf10[string_get_size_maxbuf];
char buf2[string_get_size_maxbuf];
+ char exp10[string_get_size_maxbuf];
+ char exp2[string_get_size_maxbuf];
+ char prefix10[64];
+ char prefix2[64];
+
+ sprintf(prefix10, "STRING_UNITS_10 [%s]", cut);
+ sprintf(prefix2, "STRING_UNITS_2 [%s]", cut);
+
+ __strchrcut(exp10, exp_result10, cut);
+ __strchrcut(exp2, exp_result2, cut);
- string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
- string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+ string_get_size(size, blk_size, STRING_UNITS_10 | units, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2 | units, buf2, sizeof(buf2));
- test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
- size, blk_size);
+ test_string_get_size_check(test, prefix10, exp10, buf10, size, blk_size);
+ test_string_get_size_check(test, prefix2, exp2, buf2, size, blk_size);
+}
+
+static void __test_string_get_size(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ struct {
+ enum string_size_units units;
+ const char *cut;
+ } get_size_test_cases[] = {
+ { 0, "" },
+ { STRING_UNITS_NO_SPACE, " " },
+ { STRING_UNITS_NO_SPACE | STRING_UNITS_NO_BYTES, " B" },
+ { STRING_UNITS_NO_BYTES, "B" },
+ };
+ int i;
- test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
- size, blk_size);
+ for (i = 0; i < ARRAY_SIZE(get_size_test_cases); i++)
+ __test_string_get_size_one(test, size, blk_size,
+ exp_result10, exp_result2,
+ get_size_test_cases[i].units,
+ get_size_test_cases[i].cut);
}
-static __init void test_string_get_size(void)
+static void test_get_size(struct kunit *test)
{
/* small values */
test_string_get_size_one(0, 512, "0 B", "0 B");
@@ -537,7 +563,7 @@ static __init void test_string_get_size(void)
test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
-static void __init test_string_upper_lower(void)
+static void test_upper_lower(struct kunit *test)
{
char *dst;
int i;
@@ -547,16 +573,10 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_upper[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_upper(dst, s);
- if (memcmp(dst, strings_upper[i].out, len)) {
- pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
- strings_upper[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_upper[i].out);
kfree(dst);
}
@@ -565,45 +585,45 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_lower[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_lower(dst, s);
- if (memcmp(dst, strings_lower[i].out, len)) {
- pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
- strings_lower[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_lower[i].out);
kfree(dst);
}
}
-static int __init test_string_helpers_init(void)
+static void test_unescape(struct kunit *test)
{
unsigned int i;
- pr_info("Running tests...\n");
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
- test_string_unescape("unescape", i, false);
- test_string_unescape("unescape inplace",
+ test_string_unescape(test, "unescape", i, false);
+ test_string_unescape(test, "unescape inplace",
get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
+ test_string_escape(test, "escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
+ test_string_escape(test, "escape 1", escape1, i, TEST_STRING_2_DICT_1);
+}
- /* Test string_get_size() */
- test_string_get_size();
+static struct kunit_case string_helpers_test_cases[] = {
+ KUNIT_CASE(test_get_size),
+ KUNIT_CASE(test_upper_lower),
+ KUNIT_CASE(test_unescape),
+ {}
+};
- /* Test string upper(), string_lower() */
- test_string_upper_lower();
+static struct kunit_suite string_helpers_test_suite = {
+ .name = "string_helpers",
+ .test_cases = string_helpers_test_cases,
+};
- return -EINVAL;
-}
-module_init(test_string_helpers_init);
+kunit_test_suites(&string_helpers_test_suite);
+
+MODULE_DESCRIPTION("Test cases for string helpers module");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
new file mode 100644
index 0000000000..c919e3293d
--- /dev/null
+++ b/lib/string_kunit.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for string functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
+static void string_test_memset16(struct kunit *test)
+{
+ unsigned i, j, k;
+ u16 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset16(p + i, 0xb1b2, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_memset32(struct kunit *test)
+{
+ unsigned i, j, k;
+ u32 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 4, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset32(p + i, 0xb1b2b3b4, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_memset64(struct kunit *test)
+{
+ unsigned i, j, k;
+ u64 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 8, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4b5b6b7b8ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void string_test_strchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c", 'a' + i);
+ }
+
+ result = strchr(empty_string, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strchr(empty_string, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strchr(test_string, 'z');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void string_test_strnchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ KUNIT_ASSERT_NULL_MSG(test, result,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ }
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(empty_string, 1, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strnchr(empty_string, 1, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(NULL, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void string_test_strspn(struct kunit *test)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ KUNIT_ASSERT_EQ_MSG(test, s->a, strspn(s->str, s->accept),
+ "i:%zu", i);
+ KUNIT_ASSERT_EQ_MSG(test, s->r, strcspn(s->str, s->reject),
+ "i:%zu", i);
+ }
+}
+
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void string_test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void string_test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void string_test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void string_test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void string_test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void string_test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void string_test_strscpy(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * strscpy_check() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
+
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
+
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
+
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
+
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static volatile int unconst;
+
+static void string_test_strcat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strncat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strlcat(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static void string_test_strtomem(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+
+static void string_test_memtostr(struct kunit *test)
+{
+ char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] = { 'a', 'b', 'c' };
+ char dest[sizeof(nonstring) + 1];
+
+ /* Copy in a non-NUL-terminated string into exactly right-sized dest. */
+ KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1);
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], 'X');
+
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], '\0');
+}
+
+static struct kunit_case string_test_cases[] = {
+ KUNIT_CASE(string_test_memset16),
+ KUNIT_CASE(string_test_memset32),
+ KUNIT_CASE(string_test_memset64),
+ KUNIT_CASE(string_test_strchr),
+ KUNIT_CASE(string_test_strnchr),
+ KUNIT_CASE(string_test_strspn),
+ KUNIT_CASE(string_test_strcmp),
+ KUNIT_CASE(string_test_strcmp_long_strings),
+ KUNIT_CASE(string_test_strncmp),
+ KUNIT_CASE(string_test_strncmp_long_strings),
+ KUNIT_CASE(string_test_strcasecmp),
+ KUNIT_CASE(string_test_strcasecmp_long_strings),
+ KUNIT_CASE(string_test_strncasecmp),
+ KUNIT_CASE(string_test_strncasecmp_long_strings),
+ KUNIT_CASE(string_test_strscpy),
+ KUNIT_CASE(string_test_strcat),
+ KUNIT_CASE(string_test_strncat),
+ KUNIT_CASE(string_test_strlcat),
+ KUNIT_CASE(string_test_strtomem),
+ KUNIT_CASE(string_test_memtostr),
+ {}
+};
+
+static struct kunit_suite string_test_suite = {
+ .name = "string",
+ .test_cases = string_test_cases,
+};
+
+kunit_test_suites(&string_test_suite);
+
+MODULE_DESCRIPTION("Test cases for string functions");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
deleted file mode 100644
index a6b6344354..0000000000
--- a/lib/strscpy_kunit.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Kernel module for testing 'strscpy' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-/*
- * tc() - Run a specific test case.
- * @src: Source string, argument to strscpy_pad()
- * @count: Size of destination buffer, argument to strscpy_pad()
- * @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
- * @chars: Number of characters from the src string expected to be
- * written to the dst buffer.
- * @pad: Number of pad characters expected (in the tail of dst buffer).
- * (@pad does not include the null terminator byte.)
- *
- * Calls strscpy_pad() and verifies the return value and state of the
- * destination buffer after the call returns.
- */
-static void tc(struct kunit *test, char *src, int count, int expected,
- int chars, int terminator, int pad)
-{
- int nr_bytes_poison;
- int max_expected;
- int max_count;
- int written;
- char buf[6];
- int index, i;
- const char POISON = 'z';
-
- KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
- "null source string not supported");
-
- memset(buf, POISON, sizeof(buf));
- /* Future proofing test suite, validate args */
- max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
- max_expected = count - 1; /* Space for the null */
-
- KUNIT_ASSERT_LE_MSG(test, count, max_count,
- "count (%d) is too big (%d) ... aborting", count, max_count);
- KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
- "expected (%d) is bigger than can possibly be returned (%d)",
- expected, max_expected);
-
- written = strscpy_pad(buf, src, count);
- KUNIT_ASSERT_EQ(test, written, expected);
-
- if (count && written == -E2BIG) {
- KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
- "buffer state invalid for -E2BIG");
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "too big string is not null terminated correctly");
- }
-
- for (i = 0; i < chars; i++)
- KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
- "buf[i]==%c != src[i]==%c", buf[i], src[i]);
-
- if (terminator)
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "string is not null terminated correctly");
-
- for (i = 0; i < pad; i++) {
- index = chars + terminator + i;
- KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
- "padding missing at index: %d", i);
- }
-
- nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
- for (i = 0; i < nr_bytes_poison; i++) {
- index = sizeof(buf) - 1 - i; /* Check from the end back */
- KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
- "poison value missing at index: %d", i);
- }
-}
-
-static void strscpy_test(struct kunit *test)
-{
- char dest[8];
-
- /*
- * tc() uses a destination buffer of size 6 and needs at
- * least 2 characters spare (one for null and one to check for
- * overflow). This means we should only call tc() with
- * strings up to a maximum of 4 characters long and 'count'
- * should not exceed 4. To test with longer strings increase
- * the buffer size in tc().
- */
-
- /* tc(test, src, count, expected, chars, terminator, pad) */
- tc(test, "a", 0, -E2BIG, 0, 0, 0);
- tc(test, "", 0, -E2BIG, 0, 0, 0);
-
- tc(test, "a", 1, -E2BIG, 0, 1, 0);
- tc(test, "", 1, 0, 0, 1, 0);
-
- tc(test, "ab", 2, -E2BIG, 1, 1, 0);
- tc(test, "a", 2, 1, 1, 1, 0);
- tc(test, "", 2, 0, 0, 1, 1);
-
- tc(test, "abc", 3, -E2BIG, 2, 1, 0);
- tc(test, "ab", 3, 2, 2, 1, 0);
- tc(test, "a", 3, 1, 1, 1, 1);
- tc(test, "", 3, 0, 0, 1, 2);
-
- tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
- tc(test, "abc", 4, 3, 3, 1, 0);
- tc(test, "ab", 4, 2, 2, 1, 1);
- tc(test, "a", 4, 1, 1, 1, 2);
- tc(test, "", 4, 0, 0, 1, 3);
-
- /* Compile-time-known source strings. */
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
-}
-
-static struct kunit_case strscpy_test_cases[] = {
- KUNIT_CASE(strscpy_test),
- {}
-};
-
-static struct kunit_suite strscpy_test_suite = {
- .name = "strscpy",
- .test_cases = strscpy_test_cases,
-};
-
-kunit_test_suite(strscpy_test_suite);
-
-MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65f22c2578..6dfb8d46a4 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = {
};
static bool __init
-__check_eq_uint(const char *srcfile, unsigned int line,
- const unsigned int exp_uint, unsigned int x)
+__check_eq_ulong(const char *srcfile, unsigned int line,
+ const unsigned long exp_ulong, unsigned long x)
{
- if (exp_uint != x) {
- pr_err("[%s:%u] expected %u, got %u\n",
- srcfile, line, exp_uint, x);
+ if (exp_ulong != x) {
+ pr_err("[%s:%u] expected %lu, got %lu\n",
+ srcfile, line, exp_ulong, x);
return false;
}
return true;
}
-
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
const unsigned long *exp_bmap, const unsigned long *bmap,
@@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line,
result; \
})
-#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
+#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__)
+#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y))
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
@@ -244,7 +244,7 @@ static void __init test_find_nth_bit(void)
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3, 5));
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3, 6));
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3, 7));
- expect_eq_uint(64 * 3, find_nth_bit(bmap, 64 * 3, 8));
+ expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3, 8) < 64 * 3));
expect_eq_uint(10, find_nth_bit(bmap, 64 * 3 - 1, 0));
expect_eq_uint(20, find_nth_bit(bmap, 64 * 3 - 1, 1));
@@ -254,7 +254,7 @@ static void __init test_find_nth_bit(void)
expect_eq_uint(60, find_nth_bit(bmap, 64 * 3 - 1, 5));
expect_eq_uint(80, find_nth_bit(bmap, 64 * 3 - 1, 6));
expect_eq_uint(123, find_nth_bit(bmap, 64 * 3 - 1, 7));
- expect_eq_uint(64 * 3 - 1, find_nth_bit(bmap, 64 * 3 - 1, 8));
+ expect_eq_uint(0, !!(find_nth_bit(bmap, 64 * 3 - 1, 8) < 64 * 3 - 1));
for_each_set_bit(bit, exp1, EXP1_IN_BITS) {
b = find_nth_bit(exp1, EXP1_IN_BITS, cnt++);
@@ -380,6 +380,47 @@ static void __init test_replace(void)
expect_eq_bitmap(bmap, exp3_1_0, nbits);
}
+static const unsigned long sg_mask[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000035aULL),
+};
+
+static const unsigned long sg_src[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000667ULL),
+};
+
+static const unsigned long sg_gather_exp[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000029ULL),
+};
+
+static const unsigned long sg_scatter_exp[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000021aULL),
+};
+
+static void __init test_bitmap_sg(void)
+{
+ unsigned int nbits = 64;
+ DECLARE_BITMAP(bmap_gather, 100);
+ DECLARE_BITMAP(bmap_scatter, 100);
+ DECLARE_BITMAP(bmap_tmp, 100);
+ DECLARE_BITMAP(bmap_res, 100);
+
+ /* Simple gather call */
+ bitmap_zero(bmap_gather, 100);
+ bitmap_gather(bmap_gather, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits);
+
+ /* Simple scatter call */
+ bitmap_zero(bmap_scatter, 100);
+ bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits);
+
+ /* Scatter/gather relationship */
+ bitmap_zero(bmap_tmp, 100);
+ bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits);
+ bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits);
+ expect_eq_bitmap(bmap_scatter, bmap_res, nbits);
+}
+
#define PARSE_TIME 0x1
#define NO_LEN 0x2
@@ -507,7 +548,7 @@ static void __init test_bitmap_parselist(void)
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parselist: %d: input is '%s' OK, Time: %llu\n",
i, ptest.in, time);
#undef ptest
@@ -546,7 +587,7 @@ static void __init test_bitmap_printlist(void)
goto out;
}
- pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
+ pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
@@ -624,7 +665,7 @@ static void __init test_bitmap_parse(void)
}
if (test.flags & PARSE_TIME)
- pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parse: %d: input is '%s' OK, Time: %llu\n",
i, test.in, time);
}
}
@@ -1204,14 +1245,7 @@ static void __init test_bitmap_const_eval(void)
* in runtime.
*/
- /*
- * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
- * Clang on s390 optimizes bitops at compile-time as intended, but at
- * the same time stops treating @bitmap and @bitopvar as compile-time
- * constants after regular test_bit() is executed, thus triggering the
- * build bugs below. So, call const_test_bit() there directly until
- * the compiler is fixed.
- */
+ /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */
bitmap_clear(bitmap, 0, BITS_PER_LONG);
if (!test_bit(7, bitmap))
bitmap_set(bitmap, 5, 2);
@@ -1243,8 +1277,179 @@ static void __init test_bitmap_const_eval(void)
/* ~BIT(25) */
BUILD_BUG_ON(!__builtin_constant_p(~var));
BUILD_BUG_ON(~var != ~BIT(25));
+
+ /* ~BIT(25) | BIT(25) == ~0UL */
+ bitmap_complement(&var, &var, BITS_PER_LONG);
+ __assign_bit(25, &var, true);
+
+ /* !(~(~0UL)) == 1 */
+ res = bitmap_full(&var, BITS_PER_LONG);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+}
+
+/*
+ * Test bitmap should be big enough to include the cases when start is not in
+ * the first word, and start+nbits lands in the following word.
+ */
+#define TEST_BIT_LEN (1000)
+
+/*
+ * Helper function to test bitmap_write() overwriting the chosen byte pattern.
+ */
+static void __init test_bitmap_write_helper(const char *pattern)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN);
+ unsigned long w, r, bit;
+ int i, n, nbits;
+
+ /*
+ * Only parse the pattern once and store the result in the intermediate
+ * bitmap.
+ */
+ bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN);
+
+ /*
+ * Check that writing a single bit does not accidentally touch the
+ * adjacent bits.
+ */
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (bit = 0; bit <= 1; bit++) {
+ bitmap_write(bitmap, bit, i, 1);
+ __assign_bit(i, exp_bitmap, bit);
+ expect_eq_bitmap(exp_bitmap, bitmap,
+ TEST_BIT_LEN);
+ }
+ }
+
+ /* Ensure writing 0 bits does not change anything. */
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_write(bitmap, ~0UL, i, 0);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ }
+
+ for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) {
+ w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL
+ : 0xdeadbeefUL;
+ w >>= (BITS_PER_LONG - nbits);
+ for (i = 0; i <= TEST_BIT_LEN - nbits; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (n = 0; n < nbits; n++)
+ __assign_bit(i + n, exp_bitmap, w & BIT(n));
+ bitmap_write(bitmap, w, i, nbits);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ r = bitmap_read(bitmap, i, nbits);
+ expect_eq_ulong(r, w);
+ }
+ }
+}
+
+static void __init test_bitmap_read_write(void)
+{
+ unsigned char *pattern[3] = {"", "all:1/2", "all"};
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG;
+ unsigned long val;
+ int i, pi;
+
+ /*
+ * Reading/writing zero bits should not crash the kernel.
+ * READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits));
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(zero_bits));
+
+ /*
+ * Reading/writing more than BITS_PER_LONG bits should not crash the
+ * kernel. READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1);
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1);
+
+ /*
+ * Ensure that bitmap_read() reads the same value that was previously
+ * written, and two consequent values are correctly merged.
+ * The resulting bit pattern is asymmetric to rule out possible issues
+ * with bit numeration order.
+ */
+ for (i = 0; i < TEST_BIT_LEN - 7; i++) {
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+
+ bitmap_write(bitmap, 0b10101UL, i, 5);
+ val = bitmap_read(bitmap, i, 5);
+ expect_eq_ulong(0b10101UL, val);
+
+ bitmap_write(bitmap, 0b101UL, i + 5, 3);
+ val = bitmap_read(bitmap, i + 5, 3);
+ expect_eq_ulong(0b101UL, val);
+
+ val = bitmap_read(bitmap, i, 8);
+ expect_eq_ulong(0b10110101UL, val);
+ }
+
+ for (pi = 0; pi < ARRAY_SIZE(pattern); pi++)
+ test_bitmap_write_helper(pattern[pi]);
}
+static void __init test_bitmap_read_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val;
+ ktime_t time;
+
+ bitmap_fill(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ /*
+ * Prevent the compiler from optimizing away the
+ * bitmap_read() by using its value.
+ */
+ WRITE_ONCE(val, bitmap_read(bitmap, i, nbits));
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+static void __init test_bitmap_write_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val = 0xfeedface;
+ ktime_t time;
+
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ bitmap_write(bitmap, val, i, nbits);
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+#undef TEST_BIT_LEN
+
static void __init selftest(void)
{
test_zero_clear();
@@ -1252,6 +1457,7 @@ static void __init selftest(void)
test_copy();
test_bitmap_region();
test_replace();
+ test_bitmap_sg();
test_bitmap_arr32();
test_bitmap_arr64();
test_bitmap_parse();
@@ -1261,6 +1467,9 @@ static void __init selftest(void)
test_bitmap_cut();
test_bitmap_print_buf();
test_bitmap_const_eval();
+ test_bitmap_read_write();
+ test_bitmap_read_perf();
+ test_bitmap_write_perf();
test_find_nth_bit();
test_for_each_set_bit();
diff --git a/lib/test_bitops.c b/lib/test_bitops.c
index 3b7bcbee84..55669624bb 100644
--- a/lib/test_bitops.c
+++ b/lib/test_bitops.c
@@ -5,9 +5,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/printk.h>
+#include <linux/slab.h>
/* a tiny module only meant to test
*
@@ -50,6 +52,30 @@ static unsigned long order_comb_long[][2] = {
};
#endif
+static int __init test_fns(void)
+{
+ static volatile __always_used unsigned long tmp __initdata;
+ unsigned long *buf __free(kfree) = NULL;
+ unsigned int i, n;
+ ktime_t time;
+
+ buf = kmalloc_array(10000, sizeof(unsigned long), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ get_random_bytes(buf, 10000 * sizeof(unsigned long));
+ time = ktime_get();
+
+ for (n = 0; n < BITS_PER_LONG; n++)
+ for (i = 0; i < 10000; i++)
+ tmp = fns(buf[i], n);
+
+ time = ktime_get() - time;
+ pr_err("fns: %18llu ns\n", time);
+
+ return 0;
+}
+
static int __init test_bitops_startup(void)
{
int i, bit_set;
@@ -94,6 +120,8 @@ static int __init test_bitops_startup(void)
if (bit_set != BITOPS_LAST)
pr_err("ERROR: FOUND SET BIT %d\n", bit_set);
+ test_fns();
+
pr_info("Completed bitops test\n");
return 0;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 569e6d2dc5..207ff87194 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -13431,7 +13431,7 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
- /* 64-bit atomic magnitudes */
+ /* 32-bit atomic magnitudes */
{
"ATOMIC_W_ADD: all operand magnitudes",
{ },
diff --git a/lib/test_fpu.h b/lib/test_fpu.h
new file mode 100644
index 0000000000..4459807084
--- /dev/null
+++ b/lib/test_fpu.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _LIB_TEST_FPU_H
+#define _LIB_TEST_FPU_H
+
+int test_fpu(void);
+
+#endif
diff --git a/lib/test_fpu.c b/lib/test_fpu_glue.c
index e82db19fed..eef282a271 100644
--- a/lib/test_fpu.c
+++ b/lib/test_fpu_glue.c
@@ -17,39 +17,9 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
-#include <asm/fpu/api.h>
+#include <linux/fpu.h>
-static int test_fpu(void)
-{
- /*
- * This sequence of operations tests that rounding mode is
- * to nearest and that denormal numbers are supported.
- * Volatile variables are used to avoid compiler optimizing
- * the calculations away.
- */
- volatile double a, b, c, d, e, f, g;
-
- a = 4.0;
- b = 1e-15;
- c = 1e-310;
-
- /* Sets precision flag */
- d = a + b;
-
- /* Result depends on rounding mode */
- e = a + b / 2;
-
- /* Denormal and very large values */
- f = b / c;
-
- /* Depends on denormal support */
- g = a + c * f;
-
- if (d > a && e > a && g > a)
- return 0;
- else
- return -EINVAL;
-}
+#include "test_fpu.h"
static int test_fpu_get(void *data, u64 *val)
{
@@ -68,6 +38,9 @@ static struct dentry *selftest_dir;
static int __init test_fpu_init(void)
{
+ if (!kernel_fpu_available())
+ return -EINVAL;
+
selftest_dir = debugfs_create_dir("selftest_helpers", NULL);
if (!selftest_dir)
return -ENOMEM;
diff --git a/lib/test_fpu_impl.c b/lib/test_fpu_impl.c
new file mode 100644
index 0000000000..777894dbbe
--- /dev/null
+++ b/lib/test_fpu_impl.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/errno.h>
+
+#include "test_fpu.h"
+
+int test_fpu(void)
+{
+ /*
+ * This sequence of operations tests that rounding mode is
+ * to nearest and that denormal numbers are supported.
+ * Volatile variables are used to avoid compiler optimizing
+ * the calculations away.
+ */
+ volatile double a, b, c, d, e, f, g;
+
+ a = 4.0;
+ b = 1e-15;
+ c = 1e-310;
+
+ /* Sets precision flag */
+ d = a + b;
+
+ /* Result depends on rounding mode */
+ e = a + b / 2;
+
+ /* Denormal and very large values */
+ f = b / c;
+
+ /* Depends on denormal support */
+ g = a + c * f;
+
+ if (d > a && e > a && g > a)
+ return 0;
+ else
+ return -EINVAL;
+}
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index b916801f23..fe2682bb21 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -113,7 +113,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
*p++ = ' ';
} while (p < test + rs * 2 + rs / gs + 1);
- strncpy(p, data_a, l);
+ memcpy(p, data_a, l);
p += l;
}
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 43d9dfd57a..1eec3b7ac6 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -58,11 +58,14 @@ static int num_test_devs;
* @need_mod_put for your tests case.
*/
enum kmod_test_case {
+ /* private: */
__TEST_KMOD_INVALID = 0,
+ /* public: */
TEST_KMOD_DRIVER,
TEST_KMOD_FS_TYPE,
+ /* private: */
__TEST_KMOD_MAX,
};
@@ -82,6 +85,7 @@ struct kmod_test_device;
* @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER
* @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @task_sync: kthread's task_struct or %NULL if not running
* @thread_idx: thread ID
* @test_dev: test device test is being performed under
* @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
@@ -108,7 +112,7 @@ struct kmod_test_device_info {
* @dev: pointer to misc_dev's own struct device
* @config_mutex: protects configuration of test
* @trigger_mutex: the test trigger can only be fired once at a time
- * @thread_lock: protects @done count, and the @info per each thread
+ * @thread_mutex: protects @done count, and the @info per each thread
* @done: number of threads which have completed or failed
* @test_is_oom: when we run out of memory, use this to halt moving forward
* @kthreads_done: completion used to signal when all work is done
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 29185ac5c7..399380db44 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -3599,6 +3599,45 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
+{
+ unsigned long location;
+ unsigned long next;
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+
+ next = 0;
+ mtree_lock(mt);
+ for (int i = 0; i < 100; i++) {
+ mas_alloc_cyclic(&mas, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MAS_BUG_ON(&mas, i != location - 2);
+ MAS_BUG_ON(&mas, mas.index != location);
+ MAS_BUG_ON(&mas, mas.last != location);
+ MAS_BUG_ON(&mas, i != next - 3);
+ }
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 100; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+
+ mtree_destroy(mt);
+ /* Overflow test */
+ next = ULONG_MAX - 1;
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 1);
+}
+
static DEFINE_MTREE(tree);
static int __init maple_tree_seed(void)
{
@@ -3880,6 +3919,11 @@ static int __init maple_tree_seed(void)
check_state_handling(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ alloc_cyclic_testing(&tree);
+ mtree_destroy(&tree);
+
+
#if defined(BENCH)
skip:
#endif
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 42b5852082..c63db03ebb 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
module_init(test_rht_init);
module_exit(test_rht_exit);
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
MODULE_LICENSE("GPL v2");
diff --git a/lib/test_string.c b/lib/test_string.c
deleted file mode 100644
index c5cb92fb71..0000000000
--- a/lib/test_string.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-static __init int memset16_selftest(void)
-{
- unsigned i, j, k;
- u16 v, *p;
-
- p = kmalloc(256 * 2 * 2, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset16(p + i, 0xb1b2, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2)
- goto fail;
- } else {
- if (v != 0xa1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset32_selftest(void)
-{
- unsigned i, j, k;
- u32 v, *p;
-
- p = kmalloc(256 * 2 * 4, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset32(p + i, 0xb1b2b3b4, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset64_selftest(void)
-{
- unsigned i, j, k;
- u64 v, *p;
-
- p = kmalloc(256 * 2 * 8, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4b5b6b7b8ULL)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int strchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- result = strchr(test_string, test_string[i]);
- if (result - test_string != i)
- return i + 'a';
- }
-
- result = strchr(empty_string, '\0');
- if (result != empty_string)
- return 0x101;
-
- result = strchr(empty_string, 'a');
- if (result)
- return 0x102;
-
- result = strchr(test_string, 'z');
- if (result)
- return 0x103;
-
- return 0;
-}
-
-static __init int strnchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i, j;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- for (j = 0; j < strlen(test_string) + 2; j++) {
- result = strnchr(test_string, j, test_string[i]);
- if (j <= i) {
- if (!result)
- continue;
- return ((i + 'a') << 8) | j;
- }
- if (result - test_string != i)
- return ((i + 'a') << 8) | j;
- }
- }
-
- result = strnchr(empty_string, 0, '\0');
- if (result)
- return 0x10001;
-
- result = strnchr(empty_string, 1, '\0');
- if (result != empty_string)
- return 0x10002;
-
- result = strnchr(empty_string, 1, 'a');
- if (result)
- return 0x10003;
-
- result = strnchr(NULL, 0, '\0');
- if (result)
- return 0x10004;
-
- return 0;
-}
-
-static __init int strspn_selftest(void)
-{
- static const struct strspn_test {
- const char str[16];
- const char accept[16];
- const char reject[16];
- unsigned a;
- unsigned r;
- } tests[] __initconst = {
- { "foobar", "", "", 0, 6 },
- { "abba", "abc", "ABBA", 4, 4 },
- { "abba", "a", "b", 1, 1 },
- { "", "abc", "abc", 0, 0},
- };
- const struct strspn_test *s = tests;
- size_t i, res;
-
- for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
- res = strspn(s->str, s->accept);
- if (res != s->a)
- return 0x100 + 2*i;
- res = strcspn(s->str, s->reject);
- if (res != s->r)
- return 0x100 + 2*i + 1;
- }
- return 0;
-}
-
-static __exit void string_selftest_remove(void)
-{
-}
-
-static __init int string_selftest_init(void)
-{
- int test, subtest;
-
- test = 1;
- subtest = memset16_selftest();
- if (subtest)
- goto fail;
-
- test = 2;
- subtest = memset32_selftest();
- if (subtest)
- goto fail;
-
- test = 3;
- subtest = memset64_selftest();
- if (subtest)
- goto fail;
-
- test = 4;
- subtest = strchr_selftest();
- if (subtest)
- goto fail;
-
- test = 5;
- subtest = strnchr_selftest();
- if (subtest)
- goto fail;
-
- test = 6;
- subtest = strspn_selftest();
- if (subtest)
- goto fail;
-
- pr_info("String selftests succeeded\n");
- return 0;
-fail:
- pr_crit("String selftest failure %d.%08x\n", test, subtest);
- return 0;
-}
-
-module_init(string_selftest_init);
-module_exit(string_selftest_remove);
-MODULE_LICENSE("GPL v2");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 2062be1f2e..c288df9372 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -11,6 +11,39 @@ typedef void(*test_ubsan_fp)(void);
#config, IS_ENABLED(config) ? "y" : "n"); \
} while (0)
+static void test_ubsan_add_overflow(void)
+{
+ volatile int val = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val += 2;
+}
+
+static void test_ubsan_sub_overflow(void)
+{
+ volatile int val = INT_MIN;
+ volatile int val2 = 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val -= val2;
+}
+
+static void test_ubsan_mul_overflow(void)
+{
+ volatile int val = INT_MAX / 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val *= 3;
+}
+
+static void test_ubsan_negate_overflow(void)
+{
+ volatile int val = INT_MIN;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val = -val;
+}
+
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
@@ -23,8 +56,8 @@ static void test_ubsan_divrem_overflow(void)
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
- int val1 = 10;
- int val2 = INT_MAX;
+ volatile int val1 = 10;
+ volatile int val2 = INT_MAX;
UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
val1 <<= neg;
@@ -90,6 +123,10 @@ static void test_ubsan_misaligned_access(void)
}
static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_add_overflow,
+ test_ubsan_sub_overflow,
+ test_ubsan_mul_overflow,
+ test_ubsan_negate_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
@@ -97,7 +134,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
};
/* Excluded because they Oops the module. */
-static const test_ubsan_fp skip_ubsan_array[] = {
+static __used const test_ubsan_fp skip_ubsan_array[] = {
test_ubsan_divrem_overflow,
};
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 3718d98864..4ddf769861 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -117,7 +117,7 @@ static int align_shift_alloc_test(void)
int i;
for (i = 0; i < BITS_PER_LONG; i++) {
- align = ((unsigned long) 1) << i;
+ align = 1UL << i;
ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
__builtin_return_address(0));
@@ -501,7 +501,7 @@ static int test_func(void *private)
}
static int
-init_test_configurtion(void)
+init_test_configuration(void)
{
/*
* A maximum number of workers is defined as hard-coded
@@ -531,7 +531,7 @@ static void do_concurrent_test(void)
/*
* Set some basic configurations plus sanity check.
*/
- ret = init_test_configurtion();
+ ret = init_test_configuration();
if (ret < 0)
return;
@@ -600,12 +600,7 @@ static int vmalloc_test_init(void)
return -EAGAIN; /* Fail will directly unload the module */
}
-static void vmalloc_test_exit(void)
-{
-}
-
module_init(vmalloc_test_init)
-module_exit(vmalloc_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Uladzislau Rezki");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d485644..ab9cc42a0d 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_cmpxchg_order(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ void *FIVE = xa_mk_value(5);
+ unsigned int i, order = 3;
+
+ XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
+
+ /* Check entry FIVE has the order saved */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
+
+ /* Check all the tied indexes have the same entry and order */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Ensure that nothing is stored at index '1 << order' */
+ XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
+
+ /*
+ * Additionally, keep the node information and the order at
+ * '1 << order'
+ */
+ XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Conditionally replace FIVE entry at index '0' with NULL */
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
+
+ /* Verify the order is lost at FIVE (and old) entries */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
+
+ /* Verify the order and entries are lost in all the tied indexes */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+ }
+
+ /* Verify node and order are kept at '1 << order' */
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
@@ -674,6 +727,186 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
+#ifdef CONFIG_XARRAY_MULTI
+/* mimics page cache __filemap_add_folio() */
+static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
+ unsigned long index,
+ unsigned int order,
+ void *p)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int nrpages = 1UL << order;
+
+ /* users are responsible for index alignemnt to the order when adding */
+ XA_BUG_ON(xa, index & (nrpages - 1));
+
+ xas_set_order(&xas, index, order);
+
+ do {
+ xas_lock_irq(&xas);
+ xas_store(&xas, p);
+ xas_unlock_irq(&xas);
+ /*
+ * In our selftest case the only failure we can expect is for
+ * there not to be enough memory as we're not mimicking the
+ * entire page cache, so verify that's the only error we can run
+ * into here. The xas_nomem() which follows will ensure to fix
+ * that condition for us so to chug on on the loop.
+ */
+ XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
+}
+
+/* mimics page_cache_delete() */
+static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+
+ xas_set_order(&xas, index, order);
+ xas_store(&xas, NULL);
+ xas_init_marks(&xas);
+}
+
+static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ xa_lock_irq(xa);
+ check_xa_multi_store_adv_del_entry(xa, index, order);
+ xa_unlock_irq(xa);
+}
+
+/* mimics page cache filemap_get_entry() */
+static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *p;
+ static unsigned int loops = 0;
+
+ rcu_read_lock();
+repeat:
+ xas_reset(&xas);
+ p = xas_load(&xas);
+ if (xas_retry(&xas, p))
+ goto repeat;
+ rcu_read_unlock();
+
+ /*
+ * This is not part of the page cache, this selftest is pretty
+ * aggressive and does not want to trust the xarray API but rather
+ * test it, and for order 20 (4 GiB block size) we can loop over
+ * over a million entries which can cause a soft lockup. Page cache
+ * APIs won't be stupid, proper page cache APIs loop over the proper
+ * order so when using a larger order we skip shared entries.
+ */
+ if (++loops % XA_CHECK_SCHED == 0)
+ schedule();
+
+ return p;
+}
+
+static unsigned long some_val = 0xdeadbeef;
+static unsigned long some_val_2 = 0xdeaddead;
+
+/* mimics the page cache usage */
+static noinline void check_xa_multi_store_adv(struct xarray *xa,
+ unsigned long pos,
+ unsigned int order)
+{
+ unsigned int nrpages = 1UL << order;
+ unsigned long index, base, next_index, next_next_index;
+ unsigned int i;
+
+ index = pos >> PAGE_SHIFT;
+ base = round_down(index, nrpages);
+ next_index = round_down(base + nrpages, nrpages);
+ next_next_index = round_down(next_index + nrpages, nrpages);
+
+ check_xa_multi_store_adv_add(xa, base, order, &some_val);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
+
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
+
+ /* Use order 0 for the next item */
+ check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
+
+ /* Remove the next item */
+ check_xa_multi_store_adv_delete(xa, next_index, 0);
+
+ /* Now use order for a new pointer */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ check_xa_multi_store_adv_delete(xa, base, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* starting fresh again */
+
+ /* let's test some holes now */
+
+ /* hole at base and next_next */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* hole at base and next */
+
+ check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store_advanced(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned long end = ULONG_MAX/2;
+ unsigned long pos, i;
+
+ /*
+ * About 117 million tests below.
+ */
+ for (pos = 7; pos < end; pos = (pos * pos) + 564) {
+ for (i = 0; i < max_order; i++) {
+ check_xa_multi_store_adv(xa, pos, i);
+ check_xa_multi_store_adv(xa, pos + 157, i);
+ }
+ }
+#endif
+}
+
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
@@ -1555,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order, unsigned int new_order)
{
XA_STATE_ORDER(xas, xa, index, new_order);
- unsigned int i;
+ unsigned int i, found;
+ void *entry;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
@@ -1574,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+
xa_destroy(xa);
}
@@ -1756,6 +2001,97 @@ static noinline void check_get_order(struct xarray *xa)
}
}
+static noinline void check_xas_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ for (j = i << order; j < (i + 1) << order; j++) {
+ xas_set_order(&xas, j, 0);
+ rcu_read_lock();
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ rcu_read_unlock();
+ }
+
+ xas_lock(&xas);
+ xas_set_order(&xas, i << order, order);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+static noinline void check_xas_conflict_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ void *entry;
+ int only_once;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j, k;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ /*
+ * Ensure xas_get_order works with xas_for_each_conflict.
+ */
+ j = i << order;
+ for (k = 0; k < order; k++) {
+ only_once = 0;
+ xas_set_order(&xas, j + (1 << k), k);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ if (order < max_order - 1) {
+ only_once = 0;
+ xas_set_order(&xas, (i & ~1UL) << order, order + 1);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ xas_set_order(&xas, i << order, order);
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1801,10 +2137,14 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_insert(&array);
check_cmpxchg(&array);
+ check_cmpxchg_order(&array);
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_multi_store_advanced(&array);
check_get_order(&array);
+ check_xas_get_order(&array);
+ check_xas_conflict_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index df4f8d1354..a1c983d148 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,9 +44,10 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#ifdef CONFIG_UBSAN_DIV_ZERO
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
/*
- * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerKind::IntegerDivideByZero and
+ * SanitizerKind::SignedIntegerOverflow emit
* SanitizerHandler::DivremOverflow.
*/
case ubsan_divrem_overflow:
@@ -78,6 +79,19 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
+#ifdef CONFIG_UBSAN_SIGNED_WRAP
+ /*
+ * SanitizerKind::SignedIntegerOverflow emits
+ * SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
+ * or SanitizerHandler::MulOverflow.
+ */
+ case ubsan_add_overflow:
+ return "UBSAN: integer addition overflow";
+ case ubsan_sub_overflow:
+ return "UBSAN: integer subtraction overflow";
+ case ubsan_mul_overflow:
+ return "UBSAN: integer multiplication overflow";
+#endif
default:
return "UBSAN: unrecognized failure code";
}
@@ -222,6 +236,74 @@ static void ubsan_epilogue(void)
check_panic_on_warn("UBSAN");
}
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
+{
+
+ struct type_descriptor *type = data->type;
+ char lhs_val_str[VALUE_LENGTH];
+ char rhs_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
+
+ val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
+ val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
+ pr_err("%s %c %s cannot be represented in type %s\n",
+ lhs_val_str,
+ op,
+ rhs_val_str,
+ type->type_name);
+
+ ubsan_epilogue();
+}
+
+void __ubsan_handle_add_overflow(void *data,
+ void *lhs, void *rhs)
+{
+
+ handle_overflow(data, lhs, rhs, '+');
+}
+EXPORT_SYMBOL(__ubsan_handle_add_overflow);
+
+void __ubsan_handle_sub_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '-');
+}
+EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
+
+void __ubsan_handle_mul_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '*');
+}
+EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
+
+void __ubsan_handle_negate_overflow(void *_data, void *old_val)
+{
+ struct overflow_data *data = _data;
+ char old_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "negation-overflow");
+
+ val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
+
+ pr_err("negation of %s cannot be represented in type %s:\n",
+ old_val_str, data->type->type_name);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+
+
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
struct overflow_data *data = _data;
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 5d99ab8191..07e37d4429 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -43,7 +43,7 @@ enum {
struct type_descriptor {
u16 type_kind;
u16 type_info;
- char type_name[1];
+ char type_name[];
};
struct source_location {
@@ -124,15 +124,32 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
-void __ubsan_handle_out_of_bounds(void *_data, void *index);
-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_builtin_unreachable(void *_data);
-void __ubsan_handle_load_invalid_value(void *_data, void *val);
-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
- unsigned long align,
- unsigned long offset);
+/*
+ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
+ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
+ * Fix this for earlier Clang versions by forcing the calling convention
+ * to use non-register arguments.
+ */
+#if defined(CONFIG_X86_32) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
+# define ubsan_linkage asmlinkage
+#else
+# define ubsan_linkage
+#endif
+
+void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
+void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
+void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
+void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
+void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
#endif
diff --git a/lib/usercopy.c b/lib/usercopy.c
index d29fe29c68..499a7a7d54 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/fault-inject-usercopy.h>
#include <linux/instrumented.h>
-#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <linux/nospec.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/wordpart.h>
/* out-of-line parts */
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index d883ac2995..c46c230051 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -30,4 +30,11 @@ config GENERIC_VDSO_TIME_NS
Selected by architectures which support time namespaces in the
VDSO
+config GENERIC_VDSO_OVERFLOW_PROTECT
+ bool
+ help
+ Select to add multiplication overflow protection to the VDSO
+ time getter functions for the price of an extra conditional
+ in the hotpath.
+
endif
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index ce2f695520..899850bd6f 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -5,15 +5,23 @@
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-#ifndef vdso_calc_delta
-/*
- * Default implementation which works for all sane clocksources. That
- * obviously excludes x86/TSC.
- */
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+#ifndef vdso_calc_ns
+
+#ifdef VDSO_DELTA_NOMASK
+# define VDSO_DELTA_MASK(vd) ULLONG_MAX
+#else
+# define VDSO_DELTA_MASK(vd) (vd->mask)
+#endif
+
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+ return delta < vd->max_cycles;
+}
+#else
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
{
- return ((cycles - last) & mask) * mult;
+ return true;
}
#endif
@@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
}
#endif
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+{
+ u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
+
+ if (likely(vdso_delta_ok(vd, delta)))
+ return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+
+ return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
+}
+#endif /* vdso_calc_ns */
+
#ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void)
{
@@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
- const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
- u64 cycles, last, ns;
+ const struct vdso_data *vd;
+ u64 cycles, ns;
u32 seq;
s64 sec;
@@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- u64 cycles, last, sec, ns;
+ u64 cycles, sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
@@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 552738f142..cdd4e2314b 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -966,13 +966,13 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev,
hd = bdev->bd_disk;
buf = string(buf, end, hd->disk_name, spec);
- if (bdev->bd_partno) {
+ if (bdev_is_partition(bdev)) {
if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
if (buf < end)
*buf = 'p';
buf++;
}
- buf = number(buf, end, bdev->bd_partno, spec);
+ buf = number(buf, end, bdev_partno(bdev), spec);
}
return buf;
}
diff --git a/lib/xarray.c b/lib/xarray.c
index 39f07bfc4d..32d4bac8c9 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -200,7 +200,8 @@ static void *xas_start(struct xa_state *xas)
return entry;
}
-static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+static __always_inline void *xas_descend(struct xa_state *xas,
+ struct xa_node *node)
{
unsigned int offset = get_offset(xas->xa_index, node);
void *entry = xa_entry(xas->xa, node, offset);
@@ -969,8 +970,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
return marks;
}
+static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
+ xa_mark_t mark)
+{
+ int i;
+
+ if (sibs == 0)
+ node_mark_all(node, mark);
+ else {
+ for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
+ node_set_mark(node, i, mark);
+ }
+}
+
static void node_set_marks(struct xa_node *node, unsigned int offset,
- struct xa_node *child, unsigned int marks)
+ struct xa_node *child, unsigned int sibs,
+ unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
@@ -978,7 +993,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
- node_mark_all(child, mark);
+ node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
@@ -1077,7 +1092,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
- node_set_marks(node, offset, child, marks);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
@@ -1086,7 +1102,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} else {
unsigned int canon = offset - xas->xa_sibs;
- node_set_marks(node, canon, NULL, marks);
+ node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
@@ -1750,39 +1766,52 @@ unlock:
EXPORT_SYMBOL(xa_store_range);
/**
- * xa_get_order() - Get the order of an entry.
- * @xa: XArray.
- * @index: Index of the entry.
+ * xas_get_order() - Get the order of an entry.
+ * @xas: XArray operation state.
+ *
+ * Called after xas_load, the xas should not be in an error state.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
-int xa_get_order(struct xarray *xa, unsigned long index)
+int xas_get_order(struct xa_state *xas)
{
- XA_STATE(xas, xa, index);
- void *entry;
int order = 0;
- rcu_read_lock();
- entry = xas_load(&xas);
-
- if (!entry)
- goto unlock;
-
- if (!xas.xa_node)
- goto unlock;
+ if (!xas->xa_node)
+ return 0;
for (;;) {
- unsigned int slot = xas.xa_offset + (1 << order);
+ unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
- if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
break;
order++;
}
- order += xas.xa_node->shift;
-unlock:
+ order += xas->xa_node->shift;
+ return order;
+}
+EXPORT_SYMBOL_GPL(xas_get_order);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ int order = 0;
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+ if (entry)
+ order = xas_get_order(&xas);
rcu_read_unlock();
return order;