summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug58
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.kgdb2
-rw-r--r--lib/Kconfig.ubsan31
-rw-r--r--lib/Makefile10
-rw-r--r--lib/assoc_array.c2
-rw-r--r--lib/bitmap.c7
-rw-r--r--lib/bootconfig.c3
-rw-r--r--lib/buildid.c6
-rw-r--r--lib/devres.c208
-rw-r--r--lib/dhry_1.c2
-rw-r--r--lib/dhry_run.c1
-rw-r--r--lib/dynamic_debug.c7
-rw-r--r--lib/dynamic_queue_limits.c74
-rw-r--r--lib/flex_proportions.c77
-rw-r--r--lib/fonts/Kconfig3
-rw-r--r--lib/fonts/fonts.c15
-rw-r--r--lib/fortify_kunit.c662
-rw-r--r--lib/fw_table.c15
-rw-r--r--lib/generic-radix-tree.c35
-rw-r--r--lib/iov_iter.c60
-rw-r--r--lib/kobject_uevent.c24
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/livepatch/Makefile14
-rw-r--r--lib/livepatch/test_klp_atomic_replace.c57
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c70
-rw-r--r--lib/livepatch/test_klp_callbacks_demo.c121
-rw-r--r--lib/livepatch/test_klp_callbacks_demo2.c93
-rw-r--r--lib/livepatch/test_klp_callbacks_mod.c24
-rw-r--r--lib/livepatch/test_klp_livepatch.c51
-rw-r--r--lib/livepatch/test_klp_shadow_vars.c301
-rw-r--r--lib/livepatch/test_klp_state.c162
-rw-r--r--lib/livepatch/test_klp_state2.c191
-rw-r--r--lib/livepatch/test_klp_state3.c5
-rw-r--r--lib/maple_tree.c6
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/memcpy_kunit.c3
-rw-r--r--lib/overflow_kunit.c67
-rw-r--r--lib/pci_iomap.c180
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/raid6/s390vx.uc62
-rw-r--r--lib/sort.c20
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/stackinit_kunit.c21
-rw-r--r--lib/strcat_kunit.c12
-rw-r--r--lib/string.c23
-rw-r--r--lib/string_helpers.c89
-rw-r--r--lib/string_helpers_kunit.c (renamed from lib/test-string_helpers.c)255
-rw-r--r--lib/string_kunit.c354
-rw-r--r--lib/strscpy_kunit.c51
-rw-r--r--lib/test_bitmap.c42
-rw-r--r--lib/test_kmod.c6
-rw-r--r--lib/test_maple_tree.c44
-rw-r--r--lib/test_string.c257
-rw-r--r--lib/test_ubsan.c43
-rw-r--r--lib/test_vmalloc.c11
-rw-r--r--lib/test_xarray.c249
-rw-r--r--lib/ubsan.c86
-rw-r--r--lib/ubsan.h37
-rw-r--r--lib/xarray.c23
62 files changed, 2089 insertions, 2281 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5ddda7c2ed..4557bb8a52 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -70,9 +70,6 @@ source "lib/math/Kconfig"
config NO_GENERIC_PCI_IOPORT_MAP
bool
-config GENERIC_PCI_IOMAP
- bool
-
config GENERIC_IOMAP
bool
select GENERIC_PCI_IOMAP
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 6352d59c57..291185f54e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1304,7 +1304,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
- select DEBUG_RWSEMS
+ select DEBUG_RWSEMS if !PREEMPT_RT
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select PREEMPT_COUNT if !ARCH_NO_PREEMPT
@@ -1427,7 +1427,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT
help
This debugging feature allows mismatched rw semaphore locks
and unlocks to be detected and reported.
@@ -2086,7 +2086,7 @@ config KCOV
depends on ARCH_HAS_KCOV
depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS
depends on !ARCH_WANTS_NO_INSTR || HAVE_NOINSTR_HACK || \
- GCC_VERSION >= 120000 || CLANG_VERSION >= 130000
+ GCC_VERSION >= 120000 || CC_IS_CLANG
select DEBUG_FS
select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC
select OBJTOOL if HAVE_NOINSTR_HACK
@@ -2128,7 +2128,7 @@ config KCOV_IRQ_AREA_SIZE
menuconfig RUNTIME_TESTING_MENU
bool "Runtime Testing"
- def_bool y
+ default y
if RUNTIME_TESTING_MENU
@@ -2143,7 +2143,7 @@ config TEST_DHRY
To run the benchmark, it needs to be enabled explicitly, either from
the kernel command line (when built-in), or from userspace (when
- built-in or modular.
+ built-in or modular).
Run once during kernel boot:
@@ -2354,11 +2354,15 @@ config ASYNC_RAID6_TEST
config TEST_HEXDUMP
tristate "Test functions located in the hexdump module at runtime"
-config STRING_SELFTEST
- tristate "Test string functions at runtime"
+config STRING_KUNIT_TEST
+ tristate "KUnit test string functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
-config TEST_STRING_HELPERS
- tristate "Test functions located in the string_helpers module at runtime"
+config STRING_HELPERS_KUNIT_TEST
+ tristate "KUnit test string helpers at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -2700,18 +2704,6 @@ config MEMCPY_KUNIT_TEST
If unsure, say N.
-config MEMCPY_SLOW_KUNIT_TEST
- bool "Include exhaustive memcpy tests"
- depends on MEMCPY_KUNIT_TEST
- default y
- help
- Some memcpy tests are quite exhaustive in checking for overlaps
- and bit ranges. These can be very slow, so they are split out
- as a separate config, in case they need to be disabled.
-
- Note this config option will be replaced by the use of KUnit test
- attributes.
-
config IS_SIGNED_TYPE_KUNIT_TEST
tristate "Test is_signed_type() macro" if !KUNIT_ALL_TESTS
depends on KUNIT
@@ -2750,7 +2742,7 @@ config STACKINIT_KUNIT_TEST
config FORTIFY_KUNIT_TEST
tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT && FORTIFY_SOURCE
+ depends on KUNIT
default KUNIT_ALL_TESTS
help
Builds unit tests for checking internals of FORTIFY_SOURCE as used
@@ -2859,28 +2851,6 @@ config TEST_MEMCAT_P
If unsure, say N.
-config TEST_LIVEPATCH
- tristate "Test livepatching"
- default n
- depends on DYNAMIC_DEBUG
- depends on LIVEPATCH
- depends on m
- help
- Test kernel livepatching features for correctness. The tests will
- load test modules that will be livepatched in various scenarios.
-
- To run all the livepatching tests:
-
- make -C tools/testing/selftests TARGETS=livepatch run_tests
-
- Alternatively, individual tests may be invoked:
-
- tools/testing/selftests/livepatch/test-callbacks.sh
- tools/testing/selftests/livepatch/test-livepatch.sh
- tools/testing/selftests/livepatch/test-shadow-vars.sh
-
- If unsure, say N.
-
config TEST_OBJAGG
tristate "Perform selftest on object aggreration manager"
default n
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index e6eda054ab..98016e137b 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -158,7 +158,7 @@ config KASAN_STACK
out-of-bounds bugs in stack variables.
With Clang, stack instrumentation has a problem that causes excessive
- stack usage, see https://bugs.llvm.org/show_bug.cgi?id=38809. Thus,
+ stack usage, see https://llvm.org/pr38809. Thus,
with Clang, this option is deemed unsafe.
This option is always disabled when compile-testing with Clang to
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 3b9a440084..b5c0e65767 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -43,7 +43,7 @@ config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
select CONSOLE_POLL
select MAGIC_SYSRQ
- depends on TTY && HW_CONSOLE
+ depends on TTY && VT
default y
help
Share a serial console with kgdb. Sysrq-g must be used
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 59e21bfec1..bdda600f8d 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-config ARCH_HAS_UBSAN_SANITIZE_ALL
+config ARCH_HAS_UBSAN
bool
menuconfig UBSAN
bool "Undefined behaviour sanity checker"
+ depends on ARCH_HAS_UBSAN
help
This option enables the Undefined Behaviour sanity checker.
Compile-time instrumentation is used to detect various undefined
@@ -87,7 +88,6 @@ config UBSAN_LOCAL_BOUNDS
config UBSAN_SHIFT
bool "Perform checking for bit-shift overflows"
- default UBSAN
depends on $(cc-option,-fsanitize=shift)
help
This option enables -fsanitize=shift which checks for bit-shift
@@ -116,6 +116,22 @@ config UBSAN_UNREACHABLE
This option enables -fsanitize=unreachable which checks for control
flow reaching an expected-to-be-unreachable position.
+config UBSAN_SIGNED_WRAP
+ bool "Perform checking for signed arithmetic wrap-around"
+ default UBSAN
+ depends on !COMPILE_TEST
+ # The no_sanitize attribute was introduced in GCC with version 8.
+ depends on !CC_IS_GCC || GCC_VERSION >= 80000
+ depends on $(cc-option,-fsanitize=signed-integer-overflow)
+ help
+ This option enables -fsanitize=signed-integer-overflow which checks
+ for wrap-around of any arithmetic operations with signed integers.
+ This currently performs nearly no instrumentation due to the
+ kernel's use of -fno-strict-overflow which converts all would-be
+ arithmetic undefined behavior into wrap-around arithmetic. Future
+ sanitizer versions will allow for wrap-around checking (rather than
+ exclusively undefined behavior).
+
config UBSAN_BOOL
bool "Perform checking for non-boolean values used as boolean"
default UBSAN
@@ -142,17 +158,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
-config UBSAN_SANITIZE_ALL
- bool "Enable instrumentation for the entire kernel"
- depends on ARCH_HAS_UBSAN_SANITIZE_ALL
- default y
- help
- This option activates instrumentation for the entire kernel.
- If you don't enable this option, you have to explicitly specify
- UBSAN_SANITIZE := y for the files/directories you want to check for UB.
- Enabling this option will get kernel image size increased
- significantly.
-
config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m
diff --git a/lib/Makefile b/lib/Makefile
index 6b09731d8e..ffc6b2341b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -49,9 +49,9 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o bitmap-str.o
-obj-$(CONFIG_STRING_SELFTEST) += test_string.o
+obj-$(CONFIG_STRING_KUNIT_TEST) += string_kunit.o
obj-y += string_helpers.o
-obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
+obj-$(CONFIG_STRING_HELPERS_KUNIT_TEST) += string_helpers_kunit.o
obj-y += hexdump.o
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
@@ -69,6 +69,7 @@ obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
+CFLAGS_test_ubsan.o += $(call cc-disable-warning, unused-but-set-variable)
UBSAN_SANITIZE_test_ubsan.o := y
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
@@ -134,8 +135,6 @@ endif
obj-$(CONFIG_TEST_FPU) += test_fpu.o
CFLAGS_test_fpu.o += $(FPU_CFLAGS)
-obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
-
# Some KUnit files (hooks.o) need to be built-in even when KUnit is a module,
# so we can't just use obj-$(CONFIG_KUNIT).
ifdef CONFIG_KUNIT
@@ -153,7 +152,6 @@ CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
obj-y += math/ crypto/
obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
-obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
@@ -401,6 +399,8 @@ obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index ca0b4f360c..388e656ac9 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -938,7 +938,7 @@ static bool assoc_array_insert_mid_shortcut(struct assoc_array_edit *edit,
edit->leaf_p = &new_n0->slots[0];
pr_devel("<--%s() = ok [split shortcut]\n", __func__);
- return edit;
+ return true;
}
/**
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 09522af227..b976928549 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -348,6 +348,13 @@ unsigned int __bitmap_weight_and(const unsigned long *bitmap1,
}
EXPORT_SYMBOL(__bitmap_weight_and);
+unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ return BITMAP_WEIGHT(bitmap1[idx] & ~bitmap2[idx], bits);
+}
+EXPORT_SYMBOL(__bitmap_weight_andnot);
+
void __bitmap_set(unsigned long *map, unsigned int start, int len)
{
unsigned long *p = map + BIT_WORD(start);
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 8841554432..97f8911ea3 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -901,7 +901,8 @@ static int __init xbc_parse_tree(void)
}
/**
- * xbc_exit() - Clean up all parsed bootconfig
+ * _xbc_exit() - Clean up all parsed bootconfig
+ * @early: Set true if this is called before budy system is initialized.
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can
diff --git a/lib/buildid.c b/lib/buildid.c
index e3a7acdeef..898301b49e 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -140,7 +140,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = kmap_atomic(page);
+ page_addr = kmap_local_page(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -156,7 +156,7 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = get_build_id_64(page_addr, build_id, size);
out:
- kunmap_atomic(page_addr);
+ kunmap_local(page_addr);
put_page(page);
return ret;
}
@@ -174,7 +174,7 @@ int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size)
return parse_build_id_buf(build_id, NULL, buf, buf_size);
}
-#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_CRASH_CORE)
+#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO)
unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
/**
diff --git a/lib/devres.c b/lib/devres.c
index c44f104b58..fe0c63caeb 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
#include <linux/err.h>
-#include <linux/pci.h>
#include <linux/io.h>
#include <linux/gfp.h>
#include <linux/export.h>
@@ -311,212 +311,6 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
EXPORT_SYMBOL(devm_ioport_unmap);
#endif /* CONFIG_HAS_IOPORT_MAP */
-#ifdef CONFIG_PCI
-/*
- * PCI iomap devres
- */
-#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
-
-struct pcim_iomap_devres {
- void __iomem *table[PCIM_IOMAP_MAX];
-};
-
-static void pcim_iomap_release(struct device *gendev, void *res)
-{
- struct pci_dev *dev = to_pci_dev(gendev);
- struct pcim_iomap_devres *this = res;
- int i;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (this->table[i])
- pci_iounmap(dev, this->table[i]);
-}
-
-/**
- * pcim_iomap_table - access iomap allocation table
- * @pdev: PCI device to access iomap table for
- *
- * Access iomap allocation table for @dev. If iomap table doesn't
- * exist and @pdev is managed, it will be allocated. All iomaps
- * recorded in the iomap table are automatically unmapped on driver
- * detach.
- *
- * This function might sleep when the table is first allocated but can
- * be safely called without context and guaranteed to succeed once
- * allocated.
- */
-void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
-{
- struct pcim_iomap_devres *dr, *new_dr;
-
- dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
- if (dr)
- return dr->table;
-
- new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL,
- dev_to_node(&pdev->dev));
- if (!new_dr)
- return NULL;
- dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
- return dr->table;
-}
-EXPORT_SYMBOL(pcim_iomap_table);
-
-/**
- * pcim_iomap - Managed pcim_iomap()
- * @pdev: PCI device to iomap for
- * @bar: BAR to iomap
- * @maxlen: Maximum length of iomap
- *
- * Managed pci_iomap(). Map is automatically unmapped on driver
- * detach.
- */
-void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
-{
- void __iomem **tbl;
-
- BUG_ON(bar >= PCIM_IOMAP_MAX);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
- return NULL;
-
- tbl[bar] = pci_iomap(pdev, bar, maxlen);
- return tbl[bar];
-}
-EXPORT_SYMBOL(pcim_iomap);
-
-/**
- * pcim_iounmap - Managed pci_iounmap()
- * @pdev: PCI device to iounmap for
- * @addr: Address to unmap
- *
- * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
- */
-void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
-{
- void __iomem **tbl;
- int i;
-
- pci_iounmap(pdev, addr);
-
- tbl = (void __iomem **)pcim_iomap_table(pdev);
- BUG_ON(!tbl);
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++)
- if (tbl[i] == addr) {
- tbl[i] = NULL;
- return;
- }
- WARN_ON(1);
-}
-EXPORT_SYMBOL(pcim_iounmap);
-
-/**
- * pcim_iomap_regions - Request and iomap PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to request and iomap
- * @name: Name used when requesting regions
- *
- * Request and iomap regions specified by @mask.
- */
-int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
-{
- void __iomem * const *iomap;
- int i, rc;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return -ENOMEM;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- unsigned long len;
-
- if (!(mask & (1 << i)))
- continue;
-
- rc = -EINVAL;
- len = pci_resource_len(pdev, i);
- if (!len)
- goto err_inval;
-
- rc = pci_request_region(pdev, i, name);
- if (rc)
- goto err_inval;
-
- rc = -ENOMEM;
- if (!pcim_iomap(pdev, i, 0))
- goto err_region;
- }
-
- return 0;
-
- err_region:
- pci_release_region(pdev, i);
- err_inval:
- while (--i >= 0) {
- if (!(mask & (1 << i)))
- continue;
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions);
-
-/**
- * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to iomap
- * @name: Name used when requesting regions
- *
- * Request all PCI BARs and iomap regions specified by @mask.
- */
-int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
- const char *name)
-{
- int request_mask = ((1 << 6) - 1) & ~mask;
- int rc;
-
- rc = pci_request_selected_regions(pdev, request_mask, name);
- if (rc)
- return rc;
-
- rc = pcim_iomap_regions(pdev, mask, name);
- if (rc)
- pci_release_selected_regions(pdev, request_mask);
- return rc;
-}
-EXPORT_SYMBOL(pcim_iomap_regions_request_all);
-
-/**
- * pcim_iounmap_regions - Unmap and release PCI BARs
- * @pdev: PCI device to map IO resources for
- * @mask: Mask of BARs to unmap and release
- *
- * Unmap and release regions specified by @mask.
- */
-void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
-{
- void __iomem * const *iomap;
- int i;
-
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
- return;
-
- for (i = 0; i < PCIM_IOMAP_MAX; i++) {
- if (!(mask & (1 << i)))
- continue;
-
- pcim_iounmap(pdev, iomap[i]);
- pci_release_region(pdev, i);
- }
-}
-EXPORT_SYMBOL(pcim_iounmap_regions);
-#endif /* CONFIG_PCI */
-
static void devm_arch_phys_ac_add_release(struct device *dev, void *res)
{
arch_phys_wc_del(*((int *)res));
diff --git a/lib/dhry_1.c b/lib/dhry_1.c
index 08edbbb19f..ca6c87232c 100644
--- a/lib/dhry_1.c
+++ b/lib/dhry_1.c
@@ -277,7 +277,7 @@ int dhry(int n)
dhry_assert_string_eq(Str_1_Loc, "DHRYSTONE PROGRAM, 1'ST STRING");
dhry_assert_string_eq(Str_2_Loc, "DHRYSTONE PROGRAM, 2'ND STRING");
- User_Time = ktime_to_ms(ktime_sub(End_Time, Begin_Time));
+ User_Time = ktime_ms_delta(End_Time, Begin_Time);
kfree(Ptr_Glob);
kfree(Next_Ptr_Glob);
diff --git a/lib/dhry_run.c b/lib/dhry_run.c
index f15ac666e9..e6a279dabf 100644
--- a/lib/dhry_run.c
+++ b/lib/dhry_run.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/mutex.h>
#include <linux/smp.h>
#define DHRY_VAX 1757
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index a5a687e1c9..f2c5e7910b 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -644,10 +644,9 @@ static int param_set_dyndbg_classnames(const char *instr, const struct kernel_pa
int cls_id, totct = 0;
bool wanted;
- cl_str = tmp = kstrdup(instr, GFP_KERNEL);
- p = strchr(cl_str, '\n');
- if (p)
- *p = '\0';
+ cl_str = tmp = kstrdup_and_replace(instr, '\n', '\0', GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
/* start with previously set state-bits, then modify */
curr_bits = old_bits = *dcp->bits;
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index fde0aa2441..a1389db1c3 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,10 +10,77 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/compiler.h>
#include <linux/export.h>
+#include <trace/events/napi.h>
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+static void dql_check_stall(struct dql *dql)
+{
+ unsigned short stall_thrs;
+ unsigned long now;
+
+ stall_thrs = READ_ONCE(dql->stall_thrs);
+ if (!stall_thrs)
+ return;
+
+ now = jiffies;
+ /* Check for a potential stall */
+ if (time_after_eq(now, dql->last_reap + stall_thrs)) {
+ unsigned long hist_head, t, start, end;
+
+ /* We are trying to detect a period of at least @stall_thrs
+ * jiffies without any Tx completions, but during first half
+ * of which some Tx was posted.
+ */
+dqs_again:
+ hist_head = READ_ONCE(dql->history_head);
+ /* pairs with smp_wmb() in dql_queued() */
+ smp_rmb();
+
+ /* Get the previous entry in the ring buffer, which is the
+ * oldest sample.
+ */
+ start = (hist_head - DQL_HIST_LEN + 1) * BITS_PER_LONG;
+
+ /* Advance start to continue from the last reap time */
+ if (time_before(start, dql->last_reap + 1))
+ start = dql->last_reap + 1;
+
+ /* Newest sample we should have already seen a completion for */
+ end = hist_head * BITS_PER_LONG + (BITS_PER_LONG - 1);
+
+ /* Shrink the search space to [start, (now - start_thrs/2)] if
+ * `end` is beyond the stall zone
+ */
+ if (time_before(now, end + stall_thrs / 2))
+ end = now - stall_thrs / 2;
+
+ /* Search for the queued time in [t, end] */
+ for (t = start; time_before_eq(t, end); t++)
+ if (test_bit(t % (DQL_HIST_LEN * BITS_PER_LONG),
+ dql->history))
+ break;
+
+ /* Variable t contains the time of the queue */
+ if (!time_before_eq(t, end))
+ goto no_stall;
+
+ /* The ring buffer was modified in the meantime, retry */
+ if (hist_head != READ_ONCE(dql->history_head))
+ goto dqs_again;
+
+ dql->stall_cnt++;
+ dql->stall_max = max_t(unsigned short, dql->stall_max, now - t);
+
+ trace_dql_stall_detected(dql->stall_thrs, now - t,
+ dql->last_reap, dql->history_head,
+ now, dql->history);
+ }
+no_stall:
+ dql->last_reap = now;
+}
+
/* Records completed count and recalculates the queue limit */
void dql_completed(struct dql *dql, unsigned int count)
{
@@ -110,6 +177,8 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->prev_last_obj_cnt = dql->last_obj_cnt;
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
+
+ dql_check_stall(dql);
}
EXPORT_SYMBOL(dql_completed);
@@ -125,6 +194,10 @@ void dql_reset(struct dql *dql)
dql->prev_ovlimit = 0;
dql->lowest_slack = UINT_MAX;
dql->slack_start_time = jiffies;
+
+ dql->last_reap = jiffies;
+ dql->history_head = jiffies / BITS_PER_LONG;
+ memset(dql->history, 0, sizeof(dql->history));
}
EXPORT_SYMBOL(dql_reset);
@@ -133,6 +206,7 @@ void dql_init(struct dql *dql, unsigned int hold_time)
dql->max_limit = DQL_MAX_LIMIT;
dql->min_limit = 0;
dql->slack_hold_time = hold_time;
+ dql->stall_thrs = 0;
dql_reset(dql);
}
EXPORT_SYMBOL(dql_init);
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index 83332fefa6..84ecccddc7 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -84,83 +84,6 @@ bool fprop_new_period(struct fprop_global *p, int periods)
}
/*
- * ---- SINGLE ----
- */
-
-int fprop_local_init_single(struct fprop_local_single *pl)
-{
- pl->events = 0;
- pl->period = 0;
- raw_spin_lock_init(&pl->lock);
- return 0;
-}
-
-void fprop_local_destroy_single(struct fprop_local_single *pl)
-{
-}
-
-static void fprop_reflect_period_single(struct fprop_global *p,
- struct fprop_local_single *pl)
-{
- unsigned int period = p->period;
- unsigned long flags;
-
- /* Fast path - period didn't change */
- if (pl->period == period)
- return;
- raw_spin_lock_irqsave(&pl->lock, flags);
- /* Someone updated pl->period while we were spinning? */
- if (pl->period >= period) {
- raw_spin_unlock_irqrestore(&pl->lock, flags);
- return;
- }
- /* Aging zeroed our fraction? */
- if (period - pl->period < BITS_PER_LONG)
- pl->events >>= period - pl->period;
- else
- pl->events = 0;
- pl->period = period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/* Event of type pl happened */
-void __fprop_inc_single(struct fprop_global *p, struct fprop_local_single *pl)
-{
- fprop_reflect_period_single(p, pl);
- pl->events++;
- percpu_counter_add(&p->events, 1);
-}
-
-/* Return fraction of events of type pl */
-void fprop_fraction_single(struct fprop_global *p,
- struct fprop_local_single *pl,
- unsigned long *numerator, unsigned long *denominator)
-{
- unsigned int seq;
- s64 num, den;
-
- do {
- seq = read_seqcount_begin(&p->sequence);
- fprop_reflect_period_single(p, pl);
- num = pl->events;
- den = percpu_counter_read_positive(&p->events);
- } while (read_seqcount_retry(&p->sequence, seq));
-
- /*
- * Make fraction <= 1 and denominator > 0 even in presence of percpu
- * counter errors
- */
- if (den <= num) {
- if (num)
- den = num;
- else
- den = 1;
- }
- *denominator = den;
- *numerator = num;
-}
-
-/*
* ---- PERCPU ----
*/
#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 7ee468ef21..7e945fdcbf 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,8 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || \
+ BOOTX_TEXT || EARLYFB
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9738664386..47e34950b6 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -96,18 +96,21 @@ EXPORT_SYMBOL(find_font);
* get_default_font - get default font
* @xres: screen size of X
* @yres: screen size of Y
- * @font_w: bit array of supported widths (1 - 32)
- * @font_h: bit array of supported heights (1 - 32)
+ * @font_w: bit array of supported widths (1 - FB_MAX_BLIT_WIDTH)
+ * @font_h: bit array of supported heights (1 - FB_MAX_BLIT_HEIGHT)
*
* Get the default font for a specified screen size.
* Dimensions are in pixels.
*
+ * font_w or font_h being NULL means all values are supported.
+ *
* Returns %NULL if no font is found, or a pointer to the
* chosen font.
*
*/
-const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
- u32 font_h)
+const struct font_desc *get_default_font(int xres, int yres,
+ unsigned long *font_w,
+ unsigned long *font_h)
{
int i, c, cc, res;
const struct font_desc *f, *g;
@@ -135,8 +138,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1U << (f->width - 1))) &&
- (font_h & (1U << (f->height - 1))))
+ if ((!font_w || test_bit(f->width - 1, font_w)) &&
+ (!font_h || test_bit(f->height - 1, font_h)))
c += 1000;
if (c > cc) {
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 7830a9e64e..fdba0eaf19 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Runtime test cases for CONFIG_FORTIFY_SOURCE that aren't expected to
- * Oops the kernel on success. (For those, see drivers/misc/lkdtm/fortify.c)
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(),
+ * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
*
* For corner cases with UBSAN, try testing with:
*
@@ -15,17 +15,55 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* Redefine fortify_panic() to track failures. */
+void fortify_add_kunit_error(int write);
+#define fortify_panic(func, write, avail, size, retfail) do { \
+ __fortify_report(FORTIFY_REASON(func, write), avail, size); \
+ fortify_add_kunit_error(write); \
+ return (retfail); \
+} while (0)
+
#include <kunit/device.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
+/* Handle being built without CONFIG_FORTIFY_SOURCE */
+#ifndef __compiletime_strlen
+# define __compiletime_strlen __builtin_strlen
+#endif
+
+static struct kunit_resource read_resource;
+static struct kunit_resource write_resource;
+static int fortify_read_overflows;
+static int fortify_write_overflows;
+
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
static char array_unknown[] = "compiler thinks I might change";
+void fortify_add_kunit_error(int write)
+{
+ struct kunit_resource *resource;
+ struct kunit *current_test;
+
+ current_test = kunit_get_current_test();
+ if (!current_test)
+ return;
+
+ resource = kunit_find_named_resource(current_test,
+ write ? "fortify_write_overflows"
+ : "fortify_read_overflows");
+ if (!resource)
+ return;
+
+ (*(int *)resource->data)++;
+ kunit_put_resource(resource);
+}
+
static void known_sizes_test(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
@@ -308,6 +346,610 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+/*
+ * We can't have an array at the end of a structure or else
+ * builds without -fstrict-flex-arrays=3 will report them as
+ * being an unknown length. Additionally, add bytes before
+ * and after the string to catch over/underflows if tests
+ * fail.
+ */
+struct fortify_padding {
+ unsigned long bytes_before;
+ char buf[32];
+ unsigned long bytes_after;
+};
+/* Force compiler into not being able to resolve size at compile-time. */
+static volatile int unconst;
+
+static void strlen_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+}
+
+static void strnlen_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ int i, end = sizeof(pad.buf) - 1;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(pad.buf) - 1; i++)
+ pad.buf[i] = i + '0';
+ /* Trailing bytes are still %NUL. */
+ KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* String is terminated, so strnlen() is valid. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* A truncated strnlen() will be safe, too. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
+ sizeof(pad.buf) / 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Make string unterminated, and recount. */
+ pad.buf[end] = 'A';
+ end = sizeof(pad.buf);
+ /* Reading beyond with strncpy() will fail. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ /* Early-truncated is safe still, though. */
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+
+ end = sizeof(pad.buf) / 2;
+ KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void strcpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) + 1] = { };
+ int i;
+
+ /* Fill 31 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 2; i++)
+ src[i] = i + '0';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ src[sizeof(src) - 2] = 'A';
+ /* But now we trip the overflow checking. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ src[sizeof(src) - 1] = 'A';
+ /* And for sure now, two bytes past. */
+ KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
+ == pad.buf);
+ /*
+ * Which trips both the strlen() on the unterminated src,
+ * and the resulting copy attempt.
+ */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Trailing %NUL -- thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strncpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncpy() 1 less than of max size. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Only last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate (though unterminated) max-size strncpy. */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst)
+ == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* No trailing %NUL -- thanks strncpy API. */
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* But we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And further... */
+ KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 2)
+ == pad.buf);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strscpy_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[] = "Copy me fully into a small buffer and I will overflow!";
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strscpy() 1 less than of max size. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst - 1),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Keeping space for %NUL, last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Legitimate max-size strscpy. */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst),
+ -E2BIG);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* A trailing %NUL will exist. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+
+ /* Now verify that FORTIFY is working... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(pad.buf) + unconst + 1),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And much further... */
+ KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
+ sizeof(src) * 2 + unconst),
+ -E2BIG);
+ /* Should catch the overflow. */
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ /* And we will not have gone beyond. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strcat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf) / 2] = { };
+ char one[] = "A";
+ char two[] = "BC";
+ int i;
+
+ /* Fill 15 bytes with valid characters. */
+ for (i = 0; i < sizeof(src) - 1; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strcat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strncat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+
+ /* Fill 31 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strncat() using less than half max size. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strncat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
+ /* This will have tripped both strlen() and strcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void strlcat_test(struct kunit *test)
+{
+ struct fortify_padding pad = { };
+ char src[sizeof(pad.buf)] = { };
+ int i, partial;
+ int len = sizeof(pad.buf) + unconst;
+
+ /* Fill 15 bytes with valid characters. */
+ partial = sizeof(src) / 2 - 1;
+ for (i = 0; i < partial; i++)
+ src[i] = i + 'A';
+
+ /* Destination is %NUL-filled to start with. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Legitimate strlcat() using less than half max size. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Legitimate strlcat() now 2 bytes shy of end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last two bytes should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* Add one more character to the end. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
+ /* Last byte should be %NUL */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+
+ /* And this one char will overflow. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* And adding two will overflow more. */
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ /* Last byte should be %NUL thanks to FORTIFY. */
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated destination, and overflow. */
+ pad.buf[sizeof(pad.buf) - 1] = 'A';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
+ KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+
+ /* Force an unterminated source, and overflow. */
+ memset(src, 'B', sizeof(src));
+ pad.buf[sizeof(pad.buf) - 1] = '\0';
+ KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
+ /* This will have tripped both strlen() and strlcat(). */
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
+ KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
+ /* But we should not go beyond the end. */
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
+}
+
+static void memscan_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memchr_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + strlen("Where oh where is ");
+ char needle = 'm';
+ size_t len = sizeof(haystack) + unconst;
+
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memchr_inv_test(struct kunit *test)
+{
+ char haystack[] = "Where oh where is my memory range?";
+ char *mem = haystack + 1;
+ char needle = 'W';
+ size_t len = sizeof(haystack) + unconst;
+
+ /* Normal search is okay. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
+ mem);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Catch too-large range. */
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
+ NULL);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void memcmp_test(struct kunit *test)
+{
+ char one[] = "My mind is going ...";
+ char two[] = "My mind is going ... I can feel it.";
+ size_t one_len = sizeof(one) + unconst - 1;
+ size_t two_len = sizeof(two) + unconst - 1;
+
+ /* We match the first string (ignoring the %NUL). */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ /* Still in bounds, but no longer matching. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+
+ /* Catch too-large ranges. */
+ KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+
+ KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+}
+
+static void kmemdup_test(struct kunit *test)
+{
+ char src[] = "I got Doom running on it!";
+ char *copy;
+ size_t len = sizeof(src) + unconst;
+
+ /* Copy is within bounds. */
+ copy = kmemdup(src, len, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Without %NUL. */
+ copy = kmemdup(src, len - 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Tiny bounds. */
+ copy = kmemdup(src, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, copy);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
+ kfree(copy);
+
+ /* Out of bounds by 1 byte. */
+ copy = kmemdup(src, len + 1, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
+ kfree(copy);
+
+ /* Way out of bounds. */
+ copy = kmemdup(src, len * 2, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
+ kfree(copy);
+
+ /* Starting offset causing out of bounds. */
+ copy = kmemdup(src + 1, len, GFP_KERNEL);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
+ kfree(copy);
+}
+
+static int fortify_test_init(struct kunit *test)
+{
+ if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
+ kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
+
+ fortify_read_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &read_resource,
+ "fortify_read_overflows",
+ &fortify_read_overflows);
+ fortify_write_overflows = 0;
+ kunit_add_named_resource(test, NULL, NULL, &write_resource,
+ "fortify_write_overflows",
+ &fortify_write_overflows);
+ return 0;
+}
+
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
@@ -319,11 +961,27 @@ static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
+ KUNIT_CASE(strlen_test),
+ KUNIT_CASE(strnlen_test),
+ KUNIT_CASE(strcpy_test),
+ KUNIT_CASE(strncpy_test),
+ KUNIT_CASE(strscpy_test),
+ KUNIT_CASE(strcat_test),
+ KUNIT_CASE(strncat_test),
+ KUNIT_CASE(strlcat_test),
+ /* skip memset: performs bounds checking on whole structs */
+ /* skip memcpy: still using warn-and-overwrite instead of hard-fail */
+ KUNIT_CASE(memscan_test),
+ KUNIT_CASE(memchr_test),
+ KUNIT_CASE(memchr_inv_test),
+ KUNIT_CASE(memcmp_test),
+ KUNIT_CASE(kmemdup_test),
{}
};
static struct kunit_suite fortify_test_suite = {
.name = "fortify",
+ .init = fortify_test_init,
.test_cases = fortify_test_cases,
};
diff --git a/lib/fw_table.c b/lib/fw_table.c
index c3569d2ba5..1629181445 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -127,6 +127,7 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
*
* @id: table id (for debugging purposes)
* @table_size: size of the root table
+ * @max_length: maximum size of the table (ignore if 0)
* @table_header: where does the table start?
* @proc: array of acpi_subtable_proc struct containing entry id
* and associated handler with it
@@ -148,18 +149,21 @@ static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
union fw_table_header *table_header,
+ unsigned long max_length,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
- unsigned long table_end, subtable_len, entry_len;
+ unsigned long table_len, table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
enum acpi_subtable_type type;
int count = 0;
int i;
type = acpi_get_subtable_type(id);
- table_end = (unsigned long)table_header +
- acpi_table_get_length(type, table_header);
+ table_len = acpi_table_get_length(type, table_header);
+ if (max_length && max_length < table_len)
+ table_len = max_length;
+ table_end = (unsigned long)table_header + table_len;
/* Parse all entries looking for a match. */
@@ -208,7 +212,8 @@ int __init_or_fwtbl_lib
cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg,
void *arg,
- struct acpi_table_cdat *table_header)
+ struct acpi_table_cdat *table_header,
+ unsigned long length)
{
struct acpi_subtable_proc proc = {
.id = type,
@@ -222,6 +227,6 @@ cdat_table_parse(enum acpi_cdat_type type,
return acpi_parse_entries_array(ACPI_SIG_CDAT,
sizeof(struct acpi_table_cdat),
(union fw_table_header *)table_header,
- &proc, 1, 0);
+ length, &proc, 1, 0);
}
EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 41f1bcdc44..aaefb9b678 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -5,7 +5,7 @@
#include <linux/gfp.h>
#include <linux/kmemleak.h>
-#define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
+#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *))
#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
struct genradix_node {
@@ -14,13 +14,13 @@ struct genradix_node {
struct genradix_node *children[GENRADIX_ARY];
/* Leaf: */
- u8 data[PAGE_SIZE];
+ u8 data[GENRADIX_NODE_SIZE];
};
};
static inline int genradix_depth_shift(unsigned depth)
{
- return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
+ return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth;
}
/*
@@ -33,7 +33,7 @@ static inline size_t genradix_depth_size(unsigned depth)
/* depth that's needed for a genradix that can address up to ULONG_MAX: */
#define GENRADIX_MAX_DEPTH \
- DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
+ DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT)
#define GENRADIX_DEPTH_MASK \
((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
@@ -79,23 +79,12 @@ EXPORT_SYMBOL(__genradix_ptr);
static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
{
- struct genradix_node *node;
-
- node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
-
- /*
- * We're using pages (not slab allocations) directly for kernel data
- * structures, so we need to explicitly inform kmemleak of them in order
- * to avoid false positive memory leak reports.
- */
- kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
- return node;
+ return kzalloc(GENRADIX_NODE_SIZE, gfp_mask);
}
static inline void genradix_free_node(struct genradix_node *node)
{
- kmemleak_free(node);
- free_page((unsigned long)node);
+ kfree(node);
}
/*
@@ -200,7 +189,7 @@ restart:
i++;
iter->offset = round_down(iter->offset + objs_per_ptr,
objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) *
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) *
objs_per_page;
if (i == GENRADIX_ARY)
goto restart;
@@ -209,7 +198,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek);
@@ -235,7 +224,7 @@ restart:
if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
iter->offset = genradix_depth_size(level);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
iter->offset -= obj_size_plus_page_remainder;
iter->pos--;
@@ -251,7 +240,7 @@ restart:
size_t objs_per_ptr = genradix_depth_size(level);
iter->offset = round_down(iter->offset, objs_per_ptr);
- iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+ iter->pos = (iter->offset >> GENRADIX_NODE_SHIFT) * objs_per_page;
if (!iter->offset)
return NULL;
@@ -267,7 +256,7 @@ restart:
n = n->children[i];
}
- return &n->data[iter->offset & (PAGE_SIZE - 1)];
+ return &n->data[iter->offset & (GENRADIX_NODE_SIZE - 1)];
}
EXPORT_SYMBOL(__genradix_iter_peek_prev);
@@ -289,7 +278,7 @@ int __genradix_prealloc(struct __genradix *radix, size_t size,
{
size_t offset;
- for (offset = 0; offset < size; offset += PAGE_SIZE)
+ for (offset = 0; offset < size; offset += GENRADIX_NODE_SIZE)
if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
return -ENOMEM;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index cf2eb2b2f9..4a6a9f419b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -691,12 +691,11 @@ EXPORT_SYMBOL(iov_iter_discard);
static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
+ const struct iovec *iov = iter_iov(i);
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len > size)
@@ -706,34 +705,36 @@ static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
if ((unsigned long)(iov->iov_base + skip) & addr_mask)
return false;
+ iov++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
unsigned len_mask)
{
- size_t size = i->count;
+ const struct bio_vec *bvec = i->bvec;
unsigned skip = i->iov_offset;
- unsigned k;
+ size_t size = i->count;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
+ do {
+ size_t len = bvec->bv_len;
if (len > size)
len = size;
if (len & len_mask)
return false;
- if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ if ((unsigned long)(bvec->bv_offset + skip) & addr_mask)
return false;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return true;
}
@@ -777,13 +778,12 @@ EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
+ const struct iovec *iov = iter_iov(i);
unsigned long res = 0;
size_t size = i->count;
size_t skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- const struct iovec *iov = iter_iov(i) + k;
+ do {
size_t len = iov->iov_len - skip;
if (len) {
res |= (unsigned long)iov->iov_base + skip;
@@ -791,30 +791,31 @@ static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
len = size;
res |= len;
size -= len;
- if (!size)
- break;
}
- }
+ iov++;
+ skip = 0;
+ } while (size);
return res;
}
static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
{
+ const struct bio_vec *bvec = i->bvec;
unsigned res = 0;
size_t size = i->count;
unsigned skip = i->iov_offset;
- unsigned k;
- for (k = 0; k < i->nr_segs; k++, skip = 0) {
- size_t len = i->bvec[k].bv_len - skip;
- res |= (unsigned long)i->bvec[k].bv_offset + skip;
+ do {
+ size_t len = bvec->bv_len - skip;
+ res |= (unsigned long)bvec->bv_offset + skip;
if (len > size)
len = size;
res |= len;
+ bvec++;
size -= len;
- if (!size)
- break;
- }
+ skip = 0;
+ } while (size);
+
return res;
}
@@ -1143,11 +1144,12 @@ const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
EXPORT_SYMBOL(dup_iter);
static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
- const struct iovec __user *uvec, unsigned long nr_segs)
+ const struct iovec __user *uvec, u32 nr_segs)
{
const struct compat_iovec __user *uiov =
(const struct compat_iovec __user *)uvec;
- int ret = -EFAULT, i;
+ int ret = -EFAULT;
+ u32 i;
if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
return -EFAULT;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index fb9a2f06dd..03b427e270 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -30,7 +30,7 @@
#include <net/net_namespace.h>
-u64 uevent_seqnum;
+atomic64_t uevent_seqnum;
#ifdef CONFIG_UEVENT_HELPER
char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
#endif
@@ -42,10 +42,9 @@ struct uevent_sock {
#ifdef CONFIG_NET
static LIST_HEAD(uevent_sock_list);
-#endif
-
-/* This lock protects uevent_seqnum and uevent_sock_list */
+/* This lock protects uevent_sock_list */
static DEFINE_MUTEX(uevent_sock_mutex);
+#endif
/* the strings here must match the enum in include/linux/kobject.h */
static const char *kobject_actions[] = {
@@ -315,6 +314,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
int retval = 0;
/* send netlink message */
+ mutex_lock(&uevent_sock_mutex);
list_for_each_entry(ue_sk, &uevent_sock_list, list) {
struct sock *uevent_sock = ue_sk->sk;
@@ -334,6 +334,7 @@ static int uevent_net_broadcast_untagged(struct kobj_uevent_env *env,
if (retval == -ENOBUFS || retval == -ESRCH)
retval = 0;
}
+ mutex_unlock(&uevent_sock_mutex);
consume_skb(skb);
return retval;
@@ -583,16 +584,14 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
break;
}
- mutex_lock(&uevent_sock_mutex);
/* we will send an event, so request a new sequence number */
- retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
- if (retval) {
- mutex_unlock(&uevent_sock_mutex);
+ retval = add_uevent_var(env, "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
+ if (retval)
goto exit;
- }
+
retval = kobject_uevent_net_broadcast(kobj, env, action_string,
devpath);
- mutex_unlock(&uevent_sock_mutex);
#ifdef CONFIG_UEVENT_HELPER
/* call uevent_helper, usually only enabled during early boot */
@@ -688,7 +687,8 @@ static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
int ret;
/* bump and prepare sequence number */
- ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
+ ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu",
+ atomic64_inc_return(&uevent_seqnum));
if (ret < 0 || (size_t)ret >= sizeof(buf))
return -ENOMEM;
ret++;
@@ -742,9 +742,7 @@ static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EPERM;
}
- mutex_lock(&uevent_sock_mutex);
ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
- mutex_unlock(&uevent_sock_mutex);
return ret;
}
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index 3a31fe9ed6..25c81ed465 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -36,7 +36,7 @@ struct kunit_device {
#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
-static struct bus_type kunit_bus_type = {
+static const struct bus_type kunit_bus_type = {
.name = "kunit",
};
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 689fff2b2b..70b9a43cd2 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -33,13 +33,13 @@ static char *filter_glob_param;
static char *filter_param;
static char *filter_action_param;
-module_param_named(filter_glob, filter_glob_param, charp, 0400);
+module_param_named(filter_glob, filter_glob_param, charp, 0600);
MODULE_PARM_DESC(filter_glob,
"Filter which KUnit test suites/tests run at boot-time, e.g. list* or list*.*del_test");
-module_param_named(filter, filter_param, charp, 0400);
+module_param_named(filter, filter_param, charp, 0600);
MODULE_PARM_DESC(filter,
"Filter which KUnit test suites/tests run at boot-time using attributes, e.g. speed>slow");
-module_param_named(filter_action, filter_action_param, charp, 0400);
+module_param_named(filter_action, filter_action_param, charp, 0600);
MODULE_PARM_DESC(filter_action,
"Changes behavior of filtered tests using attributes, valid values are:\n"
"<none>: do not run filtered tests as normal\n"
diff --git a/lib/livepatch/Makefile b/lib/livepatch/Makefile
deleted file mode 100644
index dcc912b347..0000000000
--- a/lib/livepatch/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for livepatch test code.
-
-obj-$(CONFIG_TEST_LIVEPATCH) += test_klp_atomic_replace.o \
- test_klp_callbacks_demo.o \
- test_klp_callbacks_demo2.o \
- test_klp_callbacks_busy.o \
- test_klp_callbacks_mod.o \
- test_klp_livepatch.o \
- test_klp_shadow_vars.o \
- test_klp_state.o \
- test_klp_state2.o \
- test_klp_state3.o
diff --git a/lib/livepatch/test_klp_atomic_replace.c b/lib/livepatch/test_klp_atomic_replace.c
deleted file mode 100644
index 5af7093ca0..0000000000
--- a/lib/livepatch/test_klp_atomic_replace.c
+++ /dev/null
@@ -1,57 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-#include <linux/seq_file.h>
-static int livepatch_meminfo_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "meminfo_proc_show",
- .new_func = livepatch_meminfo_proc_show,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, {}
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_atomic_replace_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_atomic_replace_exit(void)
-{
-}
-
-module_init(test_klp_atomic_replace_init);
-module_exit(test_klp_atomic_replace_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: atomic replace");
diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c
deleted file mode 100644
index 133929e0ce..0000000000
--- a/lib/livepatch/test_klp_callbacks_busy.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#include <linux/delay.h>
-
-/* load/run-time control from sysfs writer */
-static bool block_transition;
-module_param(block_transition, bool, 0644);
-MODULE_PARM_DESC(block_transition, "block_transition (default=false)");
-
-static void busymod_work_func(struct work_struct *work);
-static DECLARE_WORK(work, busymod_work_func);
-static DECLARE_COMPLETION(busymod_work_started);
-
-static void busymod_work_func(struct work_struct *work)
-{
- pr_info("%s enter\n", __func__);
- complete(&busymod_work_started);
-
- while (READ_ONCE(block_transition)) {
- /*
- * Busy-wait until the sysfs writer has acknowledged a
- * blocked transition and clears the flag.
- */
- msleep(20);
- }
-
- pr_info("%s exit\n", __func__);
-}
-
-static int test_klp_callbacks_busy_init(void)
-{
- pr_info("%s\n", __func__);
- schedule_work(&work);
-
- /*
- * To synchronize kernel messages, hold the init function from
- * exiting until the work function's entry message has printed.
- */
- wait_for_completion(&busymod_work_started);
-
- if (!block_transition) {
- /*
- * Serialize output: print all messages from the work
- * function before returning from init().
- */
- flush_work(&work);
- }
-
- return 0;
-}
-
-static void test_klp_callbacks_busy_exit(void)
-{
- WRITE_ONCE(block_transition, false);
- flush_work(&work);
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_busy_init);
-module_exit(test_klp_callbacks_busy_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: busy target module");
diff --git a/lib/livepatch/test_klp_callbacks_demo.c b/lib/livepatch/test_klp_callbacks_demo.c
deleted file mode 100644
index 3fd8fe1cd1..0000000000
--- a/lib/livepatch/test_klp_callbacks_demo.c
+++ /dev/null
@@ -1,121 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int pre_patch_ret;
-module_param(pre_patch_ret, int, 0644);
-MODULE_PARM_DESC(pre_patch_ret, "pre_patch_ret (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return pre_patch_ret;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static void patched_work_func(struct work_struct *work)
-{
- pr_info("%s\n", __func__);
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_func busymod_funcs[] = {
- {
- .old_name = "busymod_work_func",
- .new_func = patched_work_func,
- }, {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_mod",
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, {
- .name = "test_klp_callbacks_busy",
- .funcs = busymod_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo");
diff --git a/lib/livepatch/test_klp_callbacks_demo2.c b/lib/livepatch/test_klp_callbacks_demo2.c
deleted file mode 100644
index 5417573e80..0000000000
--- a/lib/livepatch/test_klp_callbacks_demo2.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-static int replace;
-module_param(replace, int, 0644);
-MODULE_PARM_DESC(replace, "replace (default=0)");
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return 0;
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
-}
-
-static struct klp_func no_funcs[] = {
- { }
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- /* set .replace in the init function below for demo purposes */
-};
-
-static int test_klp_callbacks_demo2_init(void)
-{
- patch.replace = replace;
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo2_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo2_init);
-module_exit(test_klp_callbacks_demo2_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch demo2");
diff --git a/lib/livepatch/test_klp_callbacks_mod.c b/lib/livepatch/test_klp_callbacks_mod.c
deleted file mode 100644
index 8fbe645b1c..0000000000
--- a/lib/livepatch/test_klp_callbacks_mod.c
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-
-static int test_klp_callbacks_mod_init(void)
-{
- pr_info("%s\n", __func__);
- return 0;
-}
-
-static void test_klp_callbacks_mod_exit(void)
-{
- pr_info("%s\n", __func__);
-}
-
-module_init(test_klp_callbacks_mod_init);
-module_exit(test_klp_callbacks_mod_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: target module");
diff --git a/lib/livepatch/test_klp_livepatch.c b/lib/livepatch/test_klp_livepatch.c
deleted file mode 100644
index aff08199de..0000000000
--- a/lib/livepatch/test_klp_livepatch.c
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/livepatch.h>
-
-#include <linux/seq_file.h>
-static int livepatch_cmdline_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s: %s\n", THIS_MODULE->name,
- "this has been live patched");
- return 0;
-}
-
-static struct klp_func funcs[] = {
- {
- .old_name = "cmdline_proc_show",
- .new_func = livepatch_cmdline_proc_show,
- }, { }
-};
-
-static struct klp_object objs[] = {
- {
- /* name being NULL means vmlinux */
- .funcs = funcs,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
-};
-
-static int test_klp_livepatch_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_livepatch_exit(void)
-{
-}
-
-module_init(test_klp_livepatch_init);
-module_exit(test_klp_livepatch_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Seth Jennings <sjenning@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: livepatch module");
diff --git a/lib/livepatch/test_klp_shadow_vars.c b/lib/livepatch/test_klp_shadow_vars.c
deleted file mode 100644
index b991164908..0000000000
--- a/lib/livepatch/test_klp_shadow_vars.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2018 Joe Lawrence <joe.lawrence@redhat.com>
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/livepatch.h>
-#include <linux/slab.h>
-
-/*
- * Keep a small list of pointers so that we can print address-agnostic
- * pointer values. Use a rolling integer count to differentiate the values.
- * Ironically we could have used the shadow variable API to do this, but
- * let's not lean too heavily on the very code we're testing.
- */
-static LIST_HEAD(ptr_list);
-struct shadow_ptr {
- void *ptr;
- int id;
- struct list_head list;
-};
-
-static void free_ptr_list(void)
-{
- struct shadow_ptr *sp, *tmp_sp;
-
- list_for_each_entry_safe(sp, tmp_sp, &ptr_list, list) {
- list_del(&sp->list);
- kfree(sp);
- }
-}
-
-static int ptr_id(void *ptr)
-{
- struct shadow_ptr *sp;
- static int count;
-
- list_for_each_entry(sp, &ptr_list, list) {
- if (sp->ptr == ptr)
- return sp->id;
- }
-
- sp = kmalloc(sizeof(*sp), GFP_ATOMIC);
- if (!sp)
- return -ENOMEM;
- sp->ptr = ptr;
- sp->id = count++;
-
- list_add(&sp->list, &ptr_list);
-
- return sp->id;
-}
-
-/*
- * Shadow variable wrapper functions that echo the function and arguments
- * to the kernel log for testing verification. Don't display raw pointers,
- * but use the ptr_id() value instead.
- */
-static void *shadow_get(void *obj, unsigned long id)
-{
- int **sv;
-
- sv = klp_shadow_get(obj, id);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx) = PTR%d\n",
- __func__, ptr_id(obj), id, ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void *shadow_get_or_alloc(void *obj, unsigned long id, size_t size,
- gfp_t gfp_flags, klp_shadow_ctor_t ctor,
- void *ctor_data)
-{
- int **var = ctor_data;
- int **sv;
-
- sv = klp_shadow_get_or_alloc(obj, id, size, gfp_flags, ctor, var);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, size=%zx, gfp_flags=%pGg), ctor=PTR%d, ctor_data=PTR%d = PTR%d\n",
- __func__, ptr_id(obj), id, size, &gfp_flags, ptr_id(ctor),
- ptr_id(*var), ptr_id(sv));
-
- return sv;
-}
-
-static void shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free(obj, id, dtor);
- pr_info("klp_%s(obj=PTR%d, id=0x%lx, dtor=PTR%d)\n",
- __func__, ptr_id(obj), id, ptr_id(dtor));
-}
-
-static void shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
-{
- klp_shadow_free_all(id, dtor);
- pr_info("klp_%s(id=0x%lx, dtor=PTR%d)\n", __func__, id, ptr_id(dtor));
-}
-
-
-/* Shadow variable constructor - remember simple pointer data */
-static int shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
-{
- int **sv = shadow_data;
- int **var = ctor_data;
-
- if (!var)
- return -EINVAL;
-
- *sv = *var;
- pr_info("%s: PTR%d -> PTR%d\n", __func__, ptr_id(sv), ptr_id(*var));
-
- return 0;
-}
-
-/*
- * With more than one item to free in the list, order is not determined and
- * shadow_dtor will not be passed to shadow_free_all() which would make the
- * test fail. (see pass 6)
- */
-static void shadow_dtor(void *obj, void *shadow_data)
-{
- int **sv = shadow_data;
-
- pr_info("%s(obj=PTR%d, shadow_data=PTR%d)\n",
- __func__, ptr_id(obj), ptr_id(sv));
-}
-
-/* number of objects we simulate that need shadow vars */
-#define NUM_OBJS 3
-
-/* dynamically created obj fields have the following shadow var id values */
-#define SV_ID1 0x1234
-#define SV_ID2 0x1235
-
-/*
- * The main test case adds/removes new fields (shadow var) to each of these
- * test structure instances. The last group of fields in the struct represent
- * the idea that shadow variables may be added and removed to and from the
- * struct during execution.
- */
-struct test_object {
- /* add anything here below and avoid to define an empty struct */
- struct shadow_ptr sp;
-
- /* these represent shadow vars added and removed with SV_ID{1,2} */
- /* char nfield1; */
- /* int nfield2; */
-};
-
-static int test_klp_shadow_vars_init(void)
-{
- struct test_object objs[NUM_OBJS];
- char nfields1[NUM_OBJS], *pnfields1[NUM_OBJS], **sv1[NUM_OBJS];
- char *pndup[NUM_OBJS];
- int nfields2[NUM_OBJS], *pnfields2[NUM_OBJS], **sv2[NUM_OBJS];
- void **sv;
- int ret;
- int i;
-
- ptr_id(NULL);
-
- /*
- * With an empty shadow variable hash table, expect not to find
- * any matches.
- */
- sv = shadow_get(&objs[0], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
-
- /* pass 1: init & alloc a char+int pair of svars for each objs */
- for (i = 0; i < NUM_OBJS; i++) {
- pnfields1[i] = &nfields1[i];
- ptr_id(pnfields1[i]);
-
- if (i % 2) {
- sv1[i] = shadow_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- } else {
- sv1[i] = shadow_get_or_alloc(&objs[i], SV_ID1,
- sizeof(pnfields1[i]), GFP_KERNEL,
- shadow_ctor, &pnfields1[i]);
- }
- if (!sv1[i]) {
- ret = -ENOMEM;
- goto out;
- }
-
- pnfields2[i] = &nfields2[i];
- ptr_id(pnfields2[i]);
- sv2[i] = shadow_alloc(&objs[i], SV_ID2, sizeof(pnfields2[i]),
- GFP_KERNEL, shadow_ctor, &pnfields2[i]);
- if (!sv2[i]) {
- ret = -ENOMEM;
- goto out;
- }
- }
-
- /* pass 2: verify we find allocated svars and where they point to */
- for (i = 0; i < NUM_OBJS; i++) {
- /* check the "char" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
-
- /* check the "int" svar for all objects */
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 3: verify that 'get_or_alloc' returns already allocated svars */
- for (i = 0; i < NUM_OBJS; i++) {
- pndup[i] = &nfields1[i];
- ptr_id(pndup[i]);
-
- sv = shadow_get_or_alloc(&objs[i], SV_ID1, sizeof(pndup[i]),
- GFP_KERNEL, shadow_ctor, &pndup[i]);
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((char **)sv == sv1[i] && *sv1[i] == pnfields1[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv1[i]), ptr_id(*sv1[i]));
- }
-
- /* pass 4: free <objs[*], SV_ID1> pairs of svars, verify removal */
- for (i = 0; i < NUM_OBJS; i++) {
- shadow_free(&objs[i], SV_ID1, shadow_dtor); /* 'char' pairs */
- sv = shadow_get(&objs[i], SV_ID1);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- /* pass 5: check we still find <objs[*], SV_ID2> svar pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2); /* 'int' pairs */
- if (!sv) {
- ret = -EINVAL;
- goto out;
- }
- if ((int **)sv == sv2[i] && *sv2[i] == pnfields2[i])
- pr_info(" got expected PTR%d -> PTR%d result\n",
- ptr_id(sv2[i]), ptr_id(*sv2[i]));
- }
-
- /* pass 6: free all the <objs[*], SV_ID2> svar pairs too. */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- for (i = 0; i < NUM_OBJS; i++) {
- sv = shadow_get(&objs[i], SV_ID2);
- if (!sv)
- pr_info(" got expected NULL result\n");
- }
-
- free_ptr_list();
-
- return 0;
-out:
- shadow_free_all(SV_ID1, NULL); /* 'char' pairs */
- shadow_free_all(SV_ID2, NULL); /* 'int' pairs */
- free_ptr_list();
-
- return ret;
-}
-
-static void test_klp_shadow_vars_exit(void)
-{
-}
-
-module_init(test_klp_shadow_vars_init);
-module_exit(test_klp_shadow_vars_exit);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Joe Lawrence <joe.lawrence@redhat.com>");
-MODULE_DESCRIPTION("Livepatch test: shadow variables");
diff --git a/lib/livepatch/test_klp_state.c b/lib/livepatch/test_klp_state.c
deleted file mode 100644
index 57a4253acb..0000000000
--- a/lib/livepatch/test_klp_state.c
+++ /dev/null
@@ -1,162 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 1 does not support migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 1
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state2.c b/lib/livepatch/test_klp_state2.c
deleted file mode 100644
index c978ea4d5e..0000000000
--- a/lib/livepatch/test_klp_state2.c
+++ /dev/null
@@ -1,191 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/printk.h>
-#include <linux/livepatch.h>
-
-#define CONSOLE_LOGLEVEL_STATE 1
-/* Version 2 supports migration. */
-#define CONSOLE_LOGLEVEL_STATE_VERSION 2
-
-static const char *const module_state[] = {
- [MODULE_STATE_LIVE] = "[MODULE_STATE_LIVE] Normal state",
- [MODULE_STATE_COMING] = "[MODULE_STATE_COMING] Full formed, running module_init",
- [MODULE_STATE_GOING] = "[MODULE_STATE_GOING] Going away",
- [MODULE_STATE_UNFORMED] = "[MODULE_STATE_UNFORMED] Still setting it up",
-};
-
-static void callback_info(const char *callback, struct klp_object *obj)
-{
- if (obj->mod)
- pr_info("%s: %s -> %s\n", callback, obj->mod->name,
- module_state[obj->mod->state]);
- else
- pr_info("%s: vmlinux\n", callback);
-}
-
-static struct klp_patch patch;
-
-static int allocate_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: space to store console_loglevel already allocated\n",
- __func__);
- return 0;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return -EINVAL;
-
- loglevel_state->data = kzalloc(sizeof(console_loglevel), GFP_KERNEL);
- if (!loglevel_state->data)
- return -ENOMEM;
-
- pr_info("%s: allocating space to store console_loglevel\n",
- __func__);
- return 0;
-}
-
-static void fix_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: taking over the console_loglevel change\n",
- __func__);
- loglevel_state->data = prev_loglevel_state->data;
- return;
- }
-
- pr_info("%s: fixing console_loglevel\n", __func__);
- *(int *)loglevel_state->data = console_loglevel;
- console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
-}
-
-static void restore_console_loglevel(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: passing the console_loglevel change back to the old livepatch\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: restoring console_loglevel\n", __func__);
- console_loglevel = *(int *)loglevel_state->data;
-}
-
-static void free_loglevel_state(void)
-{
- struct klp_state *loglevel_state, *prev_loglevel_state;
-
- prev_loglevel_state = klp_get_prev_state(CONSOLE_LOGLEVEL_STATE);
- if (prev_loglevel_state) {
- pr_info("%s: keeping space to store console_loglevel\n",
- __func__);
- return;
- }
-
- loglevel_state = klp_get_state(&patch, CONSOLE_LOGLEVEL_STATE);
- if (!loglevel_state)
- return;
-
- pr_info("%s: freeing space for the stored console_loglevel\n",
- __func__);
- kfree(loglevel_state->data);
-}
-
-/* Executed on object patching (ie, patch enablement) */
-static int pre_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- return allocate_loglevel_state();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_patch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- fix_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void pre_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- restore_console_loglevel();
-}
-
-/* Executed on object unpatching (ie, patch disablement) */
-static void post_unpatch_callback(struct klp_object *obj)
-{
- callback_info(__func__, obj);
- free_loglevel_state();
-}
-
-static struct klp_func no_funcs[] = {
- {}
-};
-
-static struct klp_object objs[] = {
- {
- .name = NULL, /* vmlinux */
- .funcs = no_funcs,
- .callbacks = {
- .pre_patch = pre_patch_callback,
- .post_patch = post_patch_callback,
- .pre_unpatch = pre_unpatch_callback,
- .post_unpatch = post_unpatch_callback,
- },
- }, { }
-};
-
-static struct klp_state states[] = {
- {
- .id = CONSOLE_LOGLEVEL_STATE,
- .version = CONSOLE_LOGLEVEL_STATE_VERSION,
- }, { }
-};
-
-static struct klp_patch patch = {
- .mod = THIS_MODULE,
- .objs = objs,
- .states = states,
- .replace = true,
-};
-
-static int test_klp_callbacks_demo_init(void)
-{
- return klp_enable_patch(&patch);
-}
-
-static void test_klp_callbacks_demo_exit(void)
-{
-}
-
-module_init(test_klp_callbacks_demo_init);
-module_exit(test_klp_callbacks_demo_exit);
-MODULE_LICENSE("GPL");
-MODULE_INFO(livepatch, "Y");
-MODULE_AUTHOR("Petr Mladek <pmladek@suse.com>");
-MODULE_DESCRIPTION("Livepatch test: system state modification");
diff --git a/lib/livepatch/test_klp_state3.c b/lib/livepatch/test_klp_state3.c
deleted file mode 100644
index 9226579d10..0000000000
--- a/lib/livepatch/test_klp_state3.c
+++ /dev/null
@@ -1,5 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2019 SUSE
-
-/* The console loglevel fix is the same in the next cumulative patch. */
-#include "test_klp_state2.c"
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index fe6092a1dc..2d7d27e6ae 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -1307,8 +1307,8 @@ static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
}
/*
- * mas_node_count() - Check if enough nodes are allocated and request more if
- * there is not enough nodes.
+ * mas_node_count_gfp() - Check if enough nodes are allocated and request more
+ * if there is not enough nodes.
* @mas: The maple state
* @count: The number of nodes needed
* @gfp: the gfp flags
@@ -2271,8 +2271,6 @@ bool mast_spanning_rebalance(struct maple_subtree_state *mast)
struct ma_state l_tmp = *mast->orig_l;
unsigned char depth = 0;
- r_tmp = *mast->orig_r;
- l_tmp = *mast->orig_l;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 55a81782e2..191761b1b6 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -22,6 +22,7 @@
#include <linux/export.h>
#include <linux/math.h>
#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/log2.h>
/* Not needed on 64bit architectures */
@@ -191,6 +192,20 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
/* can a * b overflow ? */
if (ilog2(a) + ilog2(b) > 62) {
/*
+ * Note that the algorithm after the if block below might lose
+ * some precision and the result is more exact for b > a. So
+ * exchange a and b if a is bigger than b.
+ *
+ * For example with a = 43980465100800, b = 100000000, c = 1000000000
+ * the below calculation doesn't modify b at all because div == 0
+ * and then shift becomes 45 + 26 - 62 = 9 and so the result
+ * becomes 4398035251080. However with a and b swapped the exact
+ * result is calculated (i.e. 4398046510080).
+ */
+ if (a > b)
+ swap(a, b);
+
+ /*
* (b * a) / c is equal to
*
* (b / c) * a +
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 30e00ef0bf..fd16e6ce53 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -309,9 +309,6 @@ static void set_random_nonzero(struct kunit *test, u8 *byte)
static void init_large(struct kunit *test)
{
- if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST))
- kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y");
-
/* Get many bit patterns. */
get_random_bytes(large_src, ARRAY_SIZE(large_src));
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index c85c8b121d..4ef31b0bb7 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -258,25 +258,84 @@ DEFINE_TEST_ARRAY(s64) = {
\
_of = check_ ## op ## _overflow(a, b, &_r); \
KUNIT_EXPECT_EQ_MSG(test, _of, of, \
- "expected "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" to%s overflow (type %s)\n", \
a, b, of ? "" : " not", #t); \
KUNIT_EXPECT_EQ_MSG(test, _r, r, \
- "expected "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ "expected check "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
a, b, r, _r, #t); \
/* Check for internal macro side-effects. */ \
_of = check_ ## op ## _overflow(_a_orig++, _b_orig++, &_r); \
- KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, "Unexpected " #op " macro side-effect!\n"); \
- KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, "Unexpected " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected check " #op " macro side-effect!\n"); \
+ \
+ _r = wrapping_ ## op(t, a, b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == r, \
+ "expected wrap "fmt" "sym" "fmt" == "fmt", got "fmt" (type %s)\n", \
+ a, b, r, _r, #t); \
+ /* Check for internal macro side-effects. */ \
+ _a_orig = a; \
+ _b_orig = b; \
+ _r = wrapping_ ## op(t, _a_orig++, _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, _a_orig, _a_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrap " #op " macro side-effect!\n"); \
+} while (0)
+
+static int global_counter;
+static void bump_counter(void)
+{
+ global_counter++;
+}
+
+static int get_index(void)
+{
+ volatile int index = 0;
+ bump_counter();
+ return index;
+}
+
+#define check_self_op(fmt, op, sym, a, b) do { \
+ typeof(a + 0) _a = a; \
+ typeof(b + 0) _b = b; \
+ typeof(a + 0) _a_sym = a; \
+ typeof(a + 0) _a_orig[1] = { a }; \
+ typeof(b + 0) _b_orig = b; \
+ typeof(b + 0) _b_bump = b + 1; \
+ typeof(a + 0) _r; \
+ \
+ _a_sym sym _b; \
+ _r = wrapping_ ## op(_a, _b); \
+ KUNIT_EXPECT_TRUE_MSG(test, _r == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _r); \
+ KUNIT_EXPECT_TRUE_MSG(test, _a == _a_sym, \
+ "expected "fmt" "#op" "fmt" == "fmt", got "fmt"\n", \
+ a, b, _a_sym, _a); \
+ /* Check for internal macro side-effects. */ \
+ global_counter = 0; \
+ wrapping_ ## op(_a_orig[get_index()], _b_orig++); \
+ KUNIT_EXPECT_EQ_MSG(test, global_counter, 1, \
+ "Unexpected wrapping_" #op " macro side-effect on arg1!\n"); \
+ KUNIT_EXPECT_EQ_MSG(test, _b_orig, _b_bump, \
+ "Unexpected wrapping_" #op " macro side-effect on arg2!\n"); \
} while (0)
#define DEFINE_TEST_FUNC_TYPED(n, t, fmt) \
static void do_test_ ## n(struct kunit *test, const struct test_ ## n *p) \
{ \
+ /* check_{add,sub,mul}_overflow() and wrapping_{add,sub,mul} */ \
check_one_op(t, fmt, add, "+", p->a, p->b, p->sum, p->s_of); \
check_one_op(t, fmt, add, "+", p->b, p->a, p->sum, p->s_of); \
check_one_op(t, fmt, sub, "-", p->a, p->b, p->diff, p->d_of); \
check_one_op(t, fmt, mul, "*", p->a, p->b, p->prod, p->p_of); \
check_one_op(t, fmt, mul, "*", p->b, p->a, p->prod, p->p_of); \
+ /* wrapping_assign_{add,sub}() */ \
+ check_self_op(fmt, assign_add, +=, p->a, p->b); \
+ check_self_op(fmt, assign_add, +=, p->b, p->a); \
+ check_self_op(fmt, assign_sub, -=, p->a, p->b); \
} \
\
static void n ## _overflow_test(struct kunit *test) { \
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
deleted file mode 100644
index 2829ddb0e3..0000000000
--- a/lib/pci_iomap.c
+++ /dev/null
@@ -1,180 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implement the default iomap interfaces
- *
- * (C) Copyright 2004 Linus Torvalds
- */
-#include <linux/pci.h>
-#include <linux/io.h>
-
-#include <linux/export.h>
-
-#ifdef CONFIG_PCI
-/**
- * pci_iomap_range - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
- if (len <= offset || !start)
- return NULL;
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
- if (flags & IORESOURCE_IO)
- return __pci_ioport_map(dev, start, len);
- if (flags & IORESOURCE_MEM)
- return ioremap(start, len);
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL(pci_iomap_range);
-
-/**
- * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @offset: map memory at the given offset in BAR
- * @maxlen: max length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR from offset to the end, pass %0 here.
- * */
-void __iomem *pci_iomap_wc_range(struct pci_dev *dev,
- int bar,
- unsigned long offset,
- unsigned long maxlen)
-{
- resource_size_t start = pci_resource_start(dev, bar);
- resource_size_t len = pci_resource_len(dev, bar);
- unsigned long flags = pci_resource_flags(dev, bar);
-
-
- if (flags & IORESOURCE_IO)
- return NULL;
-
- if (len <= offset || !start)
- return NULL;
-
- len -= offset;
- start += offset;
- if (maxlen && len > maxlen)
- len = maxlen;
-
- if (flags & IORESOURCE_MEM)
- return ioremap_wc(start, len);
-
- /* What? */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc_range);
-
-/**
- * pci_iomap - create a virtual mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL(pci_iomap);
-
-/**
- * pci_iomap_wc - create a virtual WC mapping cookie for a PCI BAR
- * @dev: PCI device that owns the BAR
- * @bar: BAR number
- * @maxlen: length of the memory to map
- *
- * Using this function you will get a __iomem address to your device BAR.
- * You can access it using ioread*() and iowrite*(). These functions hide
- * the details if this is a MMIO or PIO address space and will just do what
- * you expect from them in the correct way. When possible write combining
- * is used.
- *
- * @maxlen specifies the maximum length to map. If you want to get access to
- * the complete BAR without checking for its length first, pass %0 here.
- * */
-void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
- return pci_iomap_wc_range(dev, bar, 0, maxlen);
-}
-EXPORT_SYMBOL_GPL(pci_iomap_wc);
-
-/*
- * pci_iounmap() somewhat illogically comes from lib/iomap.c for the
- * CONFIG_GENERIC_IOMAP case, because that's the code that knows about
- * the different IOMAP ranges.
- *
- * But if the architecture does not use the generic iomap code, and if
- * it has _not_ defined it's own private pci_iounmap function, we define
- * it here.
- *
- * NOTE! This default implementation assumes that if the architecture
- * support ioport mapping (HAS_IOPORT_MAP), the ioport mapping will
- * be fixed to the range [ PCI_IOBASE, PCI_IOBASE+IO_SPACE_LIMIT [,
- * and does not need unmapping with 'ioport_unmap()'.
- *
- * If you have different rules for your architecture, you need to
- * implement your own pci_iounmap() that knows the rules for where
- * and how IO vs MEM get mapped.
- *
- * This code is odd, and the ARCH_HAS/ARCH_WANTS #define logic comes
- * from legacy <asm-generic/io.h> header file behavior. In particular,
- * it would seem to make sense to do the iounmap(p) for the non-IO-space
- * case here regardless, but that's not what the old header file code
- * did. Probably incorrectly, but this is meant to be bug-for-bug
- * compatible.
- */
-#if defined(ARCH_WANTS_GENERIC_PCI_IOUNMAP)
-
-void pci_iounmap(struct pci_dev *dev, void __iomem *p)
-{
-#ifdef ARCH_HAS_GENERIC_IOPORT_MAP
- uintptr_t start = (uintptr_t) PCI_IOBASE;
- uintptr_t addr = (uintptr_t) p;
-
- if (addr >= start && addr < start + IO_SPACE_LIMIT)
- return;
-#endif
- iounmap(p);
-}
-EXPORT_SYMBOL(pci_iounmap);
-
-#endif /* ARCH_WANTS_GENERIC_PCI_IOUNMAP */
-
-#endif /* CONFIG_PCI */
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 1c5420ff25..385a94aa0b 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -21,7 +21,7 @@ altivec_flags += -isystem $(shell $(CC) -print-file-name=include)
ifdef CONFIG_CC_IS_CLANG
# clang ppc port does not yet support -maltivec when -msoft-float is
# enabled. A future release of clang will resolve this
-# https://bugs.llvm.org/show_bug.cgi?id=31177
+# https://llvm.org/pr31177
CFLAGS_REMOVE_altivec1.o += -msoft-float
CFLAGS_REMOVE_altivec2.o += -msoft-float
CFLAGS_REMOVE_altivec4.o += -msoft-float
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index cd0b9e95f4..863e2d3209 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -12,15 +12,14 @@
*/
#include <linux/raid/pq.h>
-#include <asm/fpu/api.h>
-#include <asm/vx-insn.h>
+#include <asm/fpu.h>
#define NSIZE 16
-static inline void LOAD_CONST(void)
+static __always_inline void LOAD_CONST(void)
{
- asm volatile("VREPIB %v24,7");
- asm volatile("VREPIB %v25,0x1d");
+ fpu_vrepib(24, 0x07);
+ fpu_vrepib(25, 0x1d);
}
/*
@@ -28,10 +27,7 @@ static inline void LOAD_CONST(void)
* vector register y left by 1 bit and stores the result in
* vector register x.
*/
-static inline void SHLBYTE(int x, int y)
-{
- asm volatile ("VAB %0,%1,%1" : : "i" (x), "i" (y));
-}
+#define SHLBYTE(x, y) fpu_vab(x, y, y)
/*
* For each of the 16 bytes in the vector register y the MASK()
@@ -39,49 +35,17 @@ static inline void SHLBYTE(int x, int y)
* or 0x00 if the high bit is 0. The result is stored in vector
* register x.
*/
-static inline void MASK(int x, int y)
-{
- asm volatile ("VESRAVB %0,%1,24" : : "i" (x), "i" (y));
-}
-
-static inline void AND(int x, int y, int z)
-{
- asm volatile ("VN %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
-
-static inline void XOR(int x, int y, int z)
-{
- asm volatile ("VX %0,%1,%2" : : "i" (x), "i" (y), "i" (z));
-}
+#define MASK(x, y) fpu_vesravb(x, y, 24)
-static inline void LOAD_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VLM %2,%3,0,%1"
- : : "m" (*__ptr), "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void STORE_DATA(int x, u8 *ptr)
-{
- typedef struct { u8 _[16 * $#]; } addrtype;
- register addrtype *__ptr asm("1") = (addrtype *) ptr;
-
- asm volatile ("VSTM %2,%3,0,1"
- : "=m" (*__ptr) : "a" (__ptr), "i" (x),
- "i" (x + $# - 1));
-}
-
-static inline void COPY_VEC(int x, int y)
-{
- asm volatile ("VLR %0,%1" : : "i" (x), "i" (y));
-}
+#define AND(x, y, z) fpu_vn(x, y, z)
+#define XOR(x, y, z) fpu_vx(x, y, z)
+#define LOAD_DATA(x, ptr) fpu_vlm(x, x + $# - 1, ptr)
+#define STORE_DATA(x, ptr) fpu_vstm(x, x + $# - 1, ptr)
+#define COPY_VEC(x, y) fpu_vlr(x, y)
static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
@@ -114,7 +78,7 @@ static void raid6_s390vx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
size_t bytes, void **ptrs)
{
- struct kernel_fpu vxstate;
+ DECLARE_KERNEL_FPU_ONSTACK32(vxstate);
u8 **dptr, *p, *q;
int d, z, z0;
diff --git a/lib/sort.c b/lib/sort.c
index b399bf10d6..a0509088f8 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -215,6 +215,7 @@ void sort_r(void *base, size_t num, size_t size,
/* pre-scale counters for performance */
size_t n = num * size, a = (num/2) * size;
const unsigned int lsbit = size & -size; /* Used to find parent */
+ size_t shift = 0;
if (!a) /* num < 2 || size == 0 */
return;
@@ -242,12 +243,21 @@ void sort_r(void *base, size_t num, size_t size,
for (;;) {
size_t b, c, d;
- if (a) /* Building heap: sift down --a */
- a -= size;
- else if (n -= size) /* Sorting: Extract root to --n */
+ if (a) /* Building heap: sift down a */
+ a -= size << shift;
+ else if (n > 3 * size) { /* Sorting: Extract two largest elements */
+ n -= size;
do_swap(base, base + n, size, swap_func, priv);
- else /* Sort complete */
+ shift = do_cmp(base + size, base + 2 * size, cmp_func, priv) <= 0;
+ a = size << shift;
+ n -= size;
+ do_swap(base + a, base + n, size, swap_func, priv);
+ } else if (n > size) { /* Sorting: Extract root */
+ n -= size;
+ do_swap(base, base + n, size, swap_func, priv);
+ } else { /* Sort complete */
break;
+ }
/*
* Sift element at "a" down into heap. This is the
@@ -262,7 +272,7 @@ void sort_r(void *base, size_t num, size_t size,
* average, 3/4 worst-case.)
*/
for (b = a; c = 2*b + size, (d = c + size) < n;)
- b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
+ b = do_cmp(base + c, base + d, cmp_func, priv) > 0 ? c : d;
if (d == n) /* Special case last leaf with no sibling */
b = c;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 91d2301fde..cd8f234552 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -687,6 +687,14 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
}
EXPORT_SYMBOL_GPL(stack_depot_save);
+struct stack_record *__stack_depot_get_stack_record(depot_stack_handle_t handle)
+{
+ if (!handle)
+ return NULL;
+
+ return depot_fetch_stack(handle);
+}
+
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c
index 05947a2feb..3bc14d1ee8 100644
--- a/lib/stackinit_kunit.c
+++ b/lib/stackinit_kunit.c
@@ -63,7 +63,16 @@ static bool stackinit_range_contains(char *haystack_start, size_t haystack_size,
#define FETCH_ARG_STRING(var) var
#define FETCH_ARG_STRUCT(var) &var
+/*
+ * On m68k, if the leaf function test variable is longer than 8 bytes,
+ * the start of the stack frame moves. 8 is sufficiently large to
+ * test m68k char arrays, but leave it at 16 for other architectures.
+ */
+#ifdef CONFIG_M68K
+#define FILL_SIZE_STRING 8
+#else
#define FILL_SIZE_STRING 16
+#endif
#define INIT_CLONE_SCALAR /**/
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
@@ -165,19 +174,23 @@ static noinline void test_ ## name (struct kunit *test) \
/* Verify all bytes overwritten with 0xFF. */ \
for (sum = 0, i = 0; i < target_size; i++) \
sum += (check_buf[i] != 0xFF); \
- KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
- "leaf fill was not 0xFF!?\n"); \
/* Clear entire check buffer for later bit tests. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Extract stack-defined variable contents. */ \
ignored = leaf_ ##name((unsigned long)&ignored, 0, \
FETCH_ARG_ ## which(zero)); \
+ /* \
+ * Delay the sum test to here to do as little as \
+ * possible between the two leaf function calls. \
+ */ \
+ KUNIT_ASSERT_EQ_MSG(test, sum, 0, \
+ "leaf fill was not 0xFF!?\n"); \
\
/* Validate that compiler lined up fill and target. */ \
KUNIT_ASSERT_TRUE_MSG(test, \
stackinit_range_contains(fill_start, fill_size, \
target_start, target_size), \
- "stack fill missed target!? " \
+ "stackframe was not the same between calls!? " \
"(fill %zu wide, target offset by %d)\n", \
fill_size, \
(int)((ssize_t)(uintptr_t)fill_start - \
@@ -404,7 +417,7 @@ static noinline int leaf_switch_2_none(unsigned long sp, bool fill,
* These are expected to fail for most configurations because neither
* GCC nor Clang have a way to perform initialization of variables in
* non-code areas (i.e. in a switch statement before the first "case").
- * https://bugs.llvm.org/show_bug.cgi?id=44916
+ * https://llvm.org/pr44916
*/
DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, ALWAYS_FAIL);
DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, ALWAYS_FAIL);
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
index e21be95514..ca09f7f0e6 100644
--- a/lib/strcat_kunit.c
+++ b/lib/strcat_kunit.c
@@ -10,7 +10,7 @@
static volatile int unconst;
-static void strcat_test(struct kunit *test)
+static void test_strcat(struct kunit *test)
{
char dest[8];
@@ -29,7 +29,7 @@ static void strcat_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, dest, "fourAB");
}
-static void strncat_test(struct kunit *test)
+static void test_strncat(struct kunit *test)
{
char dest[8];
@@ -56,7 +56,7 @@ static void strncat_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, dest, "fourAB");
}
-static void strlcat_test(struct kunit *test)
+static void test_strlcat(struct kunit *test)
{
char dest[8] = "";
int len = sizeof(dest) + unconst;
@@ -88,9 +88,9 @@ static void strlcat_test(struct kunit *test)
}
static struct kunit_case strcat_test_cases[] = {
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
+ KUNIT_CASE(test_strcat),
+ KUNIT_CASE(test_strncat),
+ KUNIT_CASE(test_strlcat),
{}
};
diff --git a/lib/string.c b/lib/string.c
index 6891d15ce9..966da44bfc 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -15,19 +15,20 @@
*/
#define __NO_FORTIFY
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
-#include <linux/slab.h>
+#include <linux/limits.h>
+#include <linux/linkage.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/page.h>
+#include <asm/rwonce.h>
#include <asm/unaligned.h>
-#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
-#include <asm/page.h>
#ifndef __HAVE_ARCH_STRNCASECMP
/**
@@ -103,8 +104,7 @@ char *strncpy(char *dest, const char *src, size_t count)
EXPORT_SYMBOL(strncpy);
#endif
-#ifndef __HAVE_ARCH_STRSCPY
-ssize_t strscpy(char *dest, const char *src, size_t count)
+ssize_t sized_strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
size_t max = count;
@@ -170,8 +170,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
return -E2BIG;
}
-EXPORT_SYMBOL(strscpy);
-#endif
+EXPORT_SYMBOL(sized_strscpy);
/**
* stpcpy - copy a string from src to dest returning a pointer to the new end
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 7713f73e66..69ba49b853 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -18,12 +18,14 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
/**
* string_get_size - get the size in the specified units
* @size: The size to be converted in blocks
* @blk_size: Size of the block (use 1 for size in bytes)
- * @units: units to use (powers of 1000 or 1024)
+ * @units: Units to use (powers of 1000 or 1024), whether to include space separator
* @buf: buffer to format to
* @len: length of buffer
*
@@ -37,11 +39,12 @@
int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
char *buf, int len)
{
+ enum string_size_units units_base = units & STRING_UNITS_MASK;
static const char *const units_10[] = {
- "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
+ "", "k", "M", "G", "T", "P", "E", "Z", "Y",
};
static const char *const units_2[] = {
- "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"
+ "", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi",
};
static const char *const *const units_str[] = {
[STRING_UNITS_10] = units_10,
@@ -66,7 +69,7 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
/* This is Napier's algorithm. Reduce the original block size to
*
- * coefficient * divisor[units]^i
+ * coefficient * divisor[units_base]^i
*
* we do the reduction so both coefficients are just under 32 bits so
* that multiplying them together won't overflow 64 bits and we keep
@@ -76,12 +79,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
* precision is in the coefficients.
*/
while (blk_size >> 32) {
- do_div(blk_size, divisor[units]);
+ do_div(blk_size, divisor[units_base]);
i++;
}
while (size >> 32) {
- do_div(size, divisor[units]);
+ do_div(size, divisor[units_base]);
i++;
}
@@ -90,8 +93,8 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
size *= blk_size;
/* and logarithmically reduce it until it's just under the divisor */
- while (size >= divisor[units]) {
- remainder = do_div(size, divisor[units]);
+ while (size >= divisor[units_base]) {
+ remainder = do_div(size, divisor[units_base]);
i++;
}
@@ -101,10 +104,10 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
for (j = 0; sf_cap*10 < 1000; j++)
sf_cap *= 10;
- if (units == STRING_UNITS_2) {
+ if (units_base == STRING_UNITS_2) {
/* express the remainder as a decimal. It's currently the
* numerator of a fraction whose denominator is
- * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ * divisor[units_base], which is 1 << 10 for STRING_UNITS_2 */
remainder *= 1000;
remainder >>= 10;
}
@@ -126,10 +129,12 @@ int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
if (i >= ARRAY_SIZE(units_2))
unit = "UNK";
else
- unit = units_str[units][i];
+ unit = units_str[units_base][i];
- return snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s%s%s%s", (u32)size, tmp,
+ (units & STRING_UNITS_NO_SPACE) ? "" : " ",
+ unit,
+ (units & STRING_UNITS_NO_BYTES) ? "" : "B");
}
EXPORT_SYMBOL(string_get_size);
@@ -826,40 +831,6 @@ char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n)
EXPORT_SYMBOL_GPL(devm_kasprintf_strarray);
/**
- * strscpy_pad() - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always %NUL terminated, unless it's zero-sized.
- *
- * If the source string is shorter than the destination buffer, zeros
- * the tail of the destination buffer.
- *
- * For full explanation of why you may want to consider using the
- * 'strscpy' functions please see the function docstring for strscpy().
- *
- * Returns:
- * * The number of characters copied (not including the trailing %NUL)
- * * -E2BIG if count is 0 or @src was truncated.
- */
-ssize_t strscpy_pad(char *dest, const char *src, size_t count)
-{
- ssize_t written;
-
- written = strscpy(dest, src, count);
- if (written < 0 || written == count - 1)
- return written;
-
- memset(dest + written + 1, 0, count - written - 1);
-
- return written;
-}
-EXPORT_SYMBOL(strscpy_pad);
-
-/**
* skip_spaces - Removes leading whitespace from @str.
* @str: The string to be stripped.
*
@@ -1042,10 +1013,28 @@ EXPORT_SYMBOL(__read_overflow2_field);
void __write_overflow_field(size_t avail, size_t wanted) { }
EXPORT_SYMBOL(__write_overflow_field);
-void fortify_panic(const char *name)
+static const char * const fortify_func_name[] = {
+#define MAKE_FORTIFY_FUNC_NAME(func) [MAKE_FORTIFY_FUNC(func)] = #func
+ EACH_FORTIFY_FUNC(MAKE_FORTIFY_FUNC_NAME)
+#undef MAKE_FORTIFY_FUNC_NAME
+};
+
+void __fortify_report(const u8 reason, const size_t avail, const size_t size)
+{
+ const u8 func = FORTIFY_REASON_FUNC(reason);
+ const bool write = FORTIFY_REASON_DIR(reason);
+ const char *name;
+
+ name = fortify_func_name[umin(func, FORTIFY_FUNC_UNKNOWN)];
+ WARN(1, "%s: detected buffer overflow: %zu byte %s of buffer size %zu\n",
+ name, size, str_read_write(!write), avail);
+}
+EXPORT_SYMBOL(__fortify_report);
+
+void __fortify_panic(const u8 reason, const size_t avail, const size_t size)
{
- pr_emerg("detected buffer overflow in %s\n", name);
+ __fortify_report(reason, avail, size);
BUG();
}
-EXPORT_SYMBOL(fortify_panic);
+EXPORT_SYMBOL(__fortify_panic);
#endif /* CONFIG_FORTIFY_SOURCE */
diff --git a/lib/test-string_helpers.c b/lib/string_helpers_kunit.c
index 9a68849a5d..f88e39fd68 100644
--- a/lib/test-string_helpers.c
+++ b/lib/string_helpers_kunit.c
@@ -1,34 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Test cases for lib/string_helpers.c module.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/init.h>
+#include <kunit/test.h>
+#include <linux/array_size.h>
#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/string_helpers.h>
-static __init bool test_string_check_buf(const char *name, unsigned int flags,
- char *in, size_t p,
- char *out_real, size_t q_real,
- char *out_test, size_t q_test)
+static void test_string_check_buf(struct kunit *test,
+ const char *name, unsigned int flags,
+ char *in, size_t p,
+ char *out_real, size_t q_real,
+ char *out_test, size_t q_test)
{
- if (q_real == q_test && !memcmp(out_test, out_real, q_test))
- return true;
-
- pr_warn("Test '%s' failed: flags = %#x\n", name, flags);
-
- print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
- in, p, true);
- print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
- out_test, q_test, true);
- print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
- out_real, q_real, true);
-
- return false;
+ KUNIT_ASSERT_EQ_MSG(test, q_real, q_test, "name:%s", name);
+ KUNIT_EXPECT_MEMEQ_MSG(test, out_test, out_real, q_test,
+ "name:%s", name);
}
struct test_string {
@@ -37,7 +28,7 @@ struct test_string {
unsigned int flags;
};
-static const struct test_string strings[] __initconst = {
+static const struct test_string strings[] = {
{
.in = "\\f\\ \\n\\r\\t\\v",
.out = "\f\\ \n\r\t\v",
@@ -60,17 +51,19 @@ static const struct test_string strings[] __initconst = {
},
};
-static void __init test_string_unescape(const char *name, unsigned int flags,
- bool inplace)
+static void test_string_unescape(struct kunit *test,
+ const char *name, unsigned int flags,
+ bool inplace)
{
int q_real = 256;
- char *in = kmalloc(q_real, GFP_KERNEL);
- char *out_test = kmalloc(q_real, GFP_KERNEL);
- char *out_real = kmalloc(q_real, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, q_real, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, q_real, GFP_KERNEL);
int i, p = 0, q_test = 0;
- if (!in || !out_test || !out_real)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
for (i = 0; i < ARRAY_SIZE(strings); i++) {
const char *s = strings[i].in;
@@ -103,12 +96,8 @@ static void __init test_string_unescape(const char *name, unsigned int flags,
q_real = string_unescape(in, out_real, q_real, flags);
}
- test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
+ test_string_check_buf(test, name, flags, in, p - 1, out_real, q_real,
out_test, q_test);
-out:
- kfree(out_real);
- kfree(out_test);
- kfree(in);
}
struct test_string_1 {
@@ -123,7 +112,7 @@ struct test_string_2 {
};
#define TEST_STRING_2_DICT_0 NULL
-static const struct test_string_2 escape0[] __initconst = {{
+static const struct test_string_2 escape0[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\\f\\ \\n\\r\\t\\v",
@@ -221,7 +210,7 @@ static const struct test_string_2 escape0[] __initconst = {{
}};
#define TEST_STRING_2_DICT_1 "b\\ \t\r\xCF"
-static const struct test_string_2 escape1[] __initconst = {{
+static const struct test_string_2 escape1[] = {{
.in = "\f\\ \n\r\t\v",
.s1 = {{
.out = "\f\\134\\040\n\\015\\011\v",
@@ -358,7 +347,7 @@ static const struct test_string_2 escape1[] __initconst = {{
/* terminator */
}};
-static const struct test_string strings_upper[] __initconst = {
+static const struct test_string strings_upper[] = {
{
.in = "abcdefgh1234567890test",
.out = "ABCDEFGH1234567890TEST",
@@ -369,7 +358,7 @@ static const struct test_string strings_upper[] __initconst = {
},
};
-static const struct test_string strings_lower[] __initconst = {
+static const struct test_string strings_lower[] = {
{
.in = "ABCDEFGH1234567890TEST",
.out = "abcdefgh1234567890test",
@@ -380,8 +369,8 @@ static const struct test_string strings_lower[] __initconst = {
},
};
-static __init const char *test_string_find_match(const struct test_string_2 *s2,
- unsigned int flags)
+static const char *test_string_find_match(const struct test_string_2 *s2,
+ unsigned int flags)
{
const struct test_string_1 *s1 = s2->s1;
unsigned int i;
@@ -402,31 +391,31 @@ static __init const char *test_string_find_match(const struct test_string_2 *s2,
return NULL;
}
-static __init void
-test_string_escape_overflow(const char *in, int p, unsigned int flags, const char *esc,
+static void
+test_string_escape_overflow(struct kunit *test,
+ const char *in, int p, unsigned int flags, const char *esc,
int q_test, const char *name)
{
int q_real;
q_real = string_escape_mem(in, p, NULL, 0, flags, esc);
- if (q_real != q_test)
- pr_warn("Test '%s' failed: flags = %#x, osz = 0, expected %d, got %d\n",
- name, flags, q_test, q_real);
+ KUNIT_EXPECT_EQ_MSG(test, q_real, q_test, "name:%s: flags:%#x", name, flags);
}
-static __init void test_string_escape(const char *name,
- const struct test_string_2 *s2,
- unsigned int flags, const char *esc)
+static void test_string_escape(struct kunit *test, const char *name,
+ const struct test_string_2 *s2,
+ unsigned int flags, const char *esc)
{
size_t out_size = 512;
- char *out_test = kmalloc(out_size, GFP_KERNEL);
- char *out_real = kmalloc(out_size, GFP_KERNEL);
- char *in = kmalloc(256, GFP_KERNEL);
+ char *out_test = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *out_real = kunit_kzalloc(test, out_size, GFP_KERNEL);
+ char *in = kunit_kzalloc(test, 256, GFP_KERNEL);
int p = 0, q_test = 0;
int q_real;
- if (!out_test || !out_real || !in)
- goto out;
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, out_real);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, in);
for (; s2->in; s2++) {
const char *out;
@@ -462,62 +451,99 @@ static __init void test_string_escape(const char *name,
q_real = string_escape_mem(in, p, out_real, out_size, flags, esc);
- test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
+ test_string_check_buf(test, name, flags, in, p, out_real, q_real, out_test,
q_test);
- test_string_escape_overflow(in, p, flags, esc, q_test, name);
-
-out:
- kfree(in);
- kfree(out_real);
- kfree(out_test);
+ test_string_escape_overflow(test, in, p, flags, esc, q_test, name);
}
#define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
- do { \
- BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
- BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
- __test_string_get_size((size), (blk_size), (exp_result10), \
- (exp_result2)); \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \
+ do { \
+ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \
+ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \
+ __test_string_get_size(test, (size), (blk_size), (exp_result10), \
+ (exp_result2)); \
} while (0)
-static __init void test_string_get_size_check(const char *units,
- const char *exp,
- char *res,
- const u64 size,
- const u64 blk_size)
+static void test_string_get_size_check(struct kunit *test,
+ const char *units,
+ const char *exp,
+ char *res,
+ const u64 size,
+ const u64 blk_size)
{
- if (!memcmp(res, exp, strlen(exp) + 1))
- return;
-
- res[string_get_size_maxbuf - 1] = '\0';
-
- pr_warn("Test 'test_string_get_size' failed!\n");
- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
+ KUNIT_EXPECT_MEMEQ_MSG(test, res, exp, strlen(exp) + 1,
+ "string_get_size(size = %llu, blk_size = %llu, units = %s)",
size, blk_size, units);
- pr_warn("expected: '%s', got '%s'\n", exp, res);
}
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
- const char *exp_result10,
- const char *exp_result2)
+static void __strchrcut(char *dst, const char *src, const char *cut)
+{
+ const char *from = src;
+ size_t len;
+
+ do {
+ len = strcspn(from, cut);
+ memcpy(dst, from, len);
+ dst += len;
+ from += len;
+ } while (*from++);
+ *dst = '\0';
+}
+
+static void __test_string_get_size_one(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2,
+ enum string_size_units units,
+ const char *cut)
{
char buf10[string_get_size_maxbuf];
char buf2[string_get_size_maxbuf];
+ char exp10[string_get_size_maxbuf];
+ char exp2[string_get_size_maxbuf];
+ char prefix10[64];
+ char prefix2[64];
+
+ sprintf(prefix10, "STRING_UNITS_10 [%s]", cut);
+ sprintf(prefix2, "STRING_UNITS_2 [%s]", cut);
+
+ __strchrcut(exp10, exp_result10, cut);
+ __strchrcut(exp2, exp_result2, cut);
- string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
- string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+ string_get_size(size, blk_size, STRING_UNITS_10 | units, buf10, sizeof(buf10));
+ string_get_size(size, blk_size, STRING_UNITS_2 | units, buf2, sizeof(buf2));
- test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
- size, blk_size);
+ test_string_get_size_check(test, prefix10, exp10, buf10, size, blk_size);
+ test_string_get_size_check(test, prefix2, exp2, buf2, size, blk_size);
+}
+
+static void __test_string_get_size(struct kunit *test,
+ const u64 size, const u64 blk_size,
+ const char *exp_result10,
+ const char *exp_result2)
+{
+ struct {
+ enum string_size_units units;
+ const char *cut;
+ } get_size_test_cases[] = {
+ { 0, "" },
+ { STRING_UNITS_NO_SPACE, " " },
+ { STRING_UNITS_NO_SPACE | STRING_UNITS_NO_BYTES, " B" },
+ { STRING_UNITS_NO_BYTES, "B" },
+ };
+ int i;
- test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
- size, blk_size);
+ for (i = 0; i < ARRAY_SIZE(get_size_test_cases); i++)
+ __test_string_get_size_one(test, size, blk_size,
+ exp_result10, exp_result2,
+ get_size_test_cases[i].units,
+ get_size_test_cases[i].cut);
}
-static __init void test_string_get_size(void)
+static void test_get_size(struct kunit *test)
{
/* small values */
test_string_get_size_one(0, 512, "0 B", "0 B");
@@ -537,7 +563,7 @@ static __init void test_string_get_size(void)
test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
}
-static void __init test_string_upper_lower(void)
+static void test_upper_lower(struct kunit *test)
{
char *dst;
int i;
@@ -547,16 +573,10 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_upper[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_upper(dst, s);
- if (memcmp(dst, strings_upper[i].out, len)) {
- pr_warn("Test 'string_upper' failed : expected %s, got %s!\n",
- strings_upper[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_upper[i].out);
kfree(dst);
}
@@ -565,45 +585,44 @@ static void __init test_string_upper_lower(void)
int len = strlen(strings_lower[i].in) + 1;
dst = kmalloc(len, GFP_KERNEL);
- if (!dst)
- return;
+ KUNIT_ASSERT_NOT_NULL(test, dst);
string_lower(dst, s);
- if (memcmp(dst, strings_lower[i].out, len)) {
- pr_warn("Test 'string_lower failed : : expected %s, got %s!\n",
- strings_lower[i].out, dst);
- kfree(dst);
- return;
- }
+ KUNIT_EXPECT_STREQ(test, dst, strings_lower[i].out);
kfree(dst);
}
}
-static int __init test_string_helpers_init(void)
+static void test_unescape(struct kunit *test)
{
unsigned int i;
- pr_info("Running tests...\n");
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
- test_string_unescape("unescape", i, false);
- test_string_unescape("unescape inplace",
+ test_string_unescape(test, "unescape", i, false);
+ test_string_unescape(test, "unescape inplace",
get_random_u32_below(UNESCAPE_ALL_MASK + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
+ test_string_escape(test, "escape 0", escape0, i, TEST_STRING_2_DICT_0);
/* With dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
- test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
+ test_string_escape(test, "escape 1", escape1, i, TEST_STRING_2_DICT_1);
+}
- /* Test string_get_size() */
- test_string_get_size();
+static struct kunit_case string_helpers_test_cases[] = {
+ KUNIT_CASE(test_get_size),
+ KUNIT_CASE(test_upper_lower),
+ KUNIT_CASE(test_unescape),
+ {}
+};
- /* Test string upper(), string_lower() */
- test_string_upper_lower();
+static struct kunit_suite string_helpers_test_suite = {
+ .name = "string_helpers",
+ .test_cases = string_helpers_test_cases,
+};
+
+kunit_test_suites(&string_helpers_test_suite);
- return -EINVAL;
-}
-module_init(test_string_helpers_init);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
new file mode 100644
index 0000000000..dd19bd7748
--- /dev/null
+++ b/lib/string_kunit.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for string functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
+static void test_memset16(struct kunit *test)
+{
+ unsigned i, j, k;
+ u16 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 2, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset16(p + i, 0xb1b2, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_memset32(struct kunit *test)
+{
+ unsigned i, j, k;
+ u32 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 4, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset32(p + i, 0xb1b2b3b4, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_memset64(struct kunit *test)
+{
+ unsigned i, j, k;
+ u64 v, *p;
+
+ p = kunit_kzalloc(test, 256 * 2 * 8, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p);
+
+ for (i = 0; i < 256; i++) {
+ for (j = 0; j < 256; j++) {
+ memset(p, 0xa1, 256 * 2 * sizeof(v));
+ memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
+ for (k = 0; k < 512; k++) {
+ v = p[k];
+ if (k < i) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else if (k < i + j) {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xb1b2b3b4b5b6b7b8ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, v, 0xa1a1a1a1a1a1a1a1ULL,
+ "i:%d j:%d k:%d", i, j, k);
+ }
+ }
+ }
+ }
+}
+
+static void test_strchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ result = strchr(test_string, test_string[i]);
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c", 'a' + i);
+ }
+
+ result = strchr(empty_string, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strchr(empty_string, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strchr(test_string, 'z');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void test_strnchr(struct kunit *test)
+{
+ const char *test_string = "abcdefghijkl";
+ const char *empty_string = "";
+ char *result;
+ int i, j;
+
+ for (i = 0; i < strlen(test_string) + 1; i++) {
+ for (j = 0; j < strlen(test_string) + 2; j++) {
+ result = strnchr(test_string, j, test_string[i]);
+ if (j <= i) {
+ KUNIT_ASSERT_NULL_MSG(test, result,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ } else {
+ KUNIT_ASSERT_EQ_MSG(test, result - test_string, i,
+ "char:%c i:%d j:%d", 'a' + i, i, j);
+ }
+ }
+ }
+
+ result = strnchr(empty_string, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(empty_string, 1, '\0');
+ KUNIT_ASSERT_PTR_EQ(test, result, empty_string);
+
+ result = strnchr(empty_string, 1, 'a');
+ KUNIT_ASSERT_NULL(test, result);
+
+ result = strnchr(NULL, 0, '\0');
+ KUNIT_ASSERT_NULL(test, result);
+}
+
+static void test_strspn(struct kunit *test)
+{
+ static const struct strspn_test {
+ const char str[16];
+ const char accept[16];
+ const char reject[16];
+ unsigned a;
+ unsigned r;
+ } tests[] = {
+ { "foobar", "", "", 0, 6 },
+ { "abba", "abc", "ABBA", 4, 4 },
+ { "abba", "a", "b", 1, 1 },
+ { "", "abc", "abc", 0, 0},
+ };
+ const struct strspn_test *s = tests;
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
+ KUNIT_ASSERT_EQ_MSG(test, s->a, strspn(s->str, s->accept),
+ "i:%zu", i);
+ KUNIT_ASSERT_EQ_MSG(test, s->r, strcspn(s->str, s->reject),
+ "i:%zu", i);
+ }
+}
+
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static struct kunit_case string_test_cases[] = {
+ KUNIT_CASE(test_memset16),
+ KUNIT_CASE(test_memset32),
+ KUNIT_CASE(test_memset64),
+ KUNIT_CASE(test_strchr),
+ KUNIT_CASE(test_strnchr),
+ KUNIT_CASE(test_strspn),
+ KUNIT_CASE(test_strcmp),
+ KUNIT_CASE(test_strcmp_long_strings),
+ KUNIT_CASE(test_strncmp),
+ KUNIT_CASE(test_strncmp_long_strings),
+ KUNIT_CASE(test_strcasecmp),
+ KUNIT_CASE(test_strcasecmp_long_strings),
+ KUNIT_CASE(test_strncasecmp),
+ KUNIT_CASE(test_strncasecmp_long_strings),
+ {}
+};
+
+static struct kunit_suite string_test_suite = {
+ .name = "string",
+ .test_cases = string_test_cases,
+};
+
+kunit_test_suites(&string_test_suite);
+
+MODULE_LICENSE("GPL v2");
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
index a6b6344354..b6d1d93a88 100644
--- a/lib/strscpy_kunit.c
+++ b/lib/strscpy_kunit.c
@@ -8,22 +8,23 @@
#include <kunit/test.h>
#include <linux/string.h>
-/*
- * tc() - Run a specific test case.
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
* @src: Source string, argument to strscpy_pad()
* @count: Size of destination buffer, argument to strscpy_pad()
* @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
* @chars: Number of characters from the src string expected to be
* written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
* @pad: Number of pad characters expected (in the tail of dst buffer).
* (@pad does not include the null terminator byte.)
*
* Calls strscpy_pad() and verifies the return value and state of the
* destination buffer after the call returns.
*/
-static void tc(struct kunit *test, char *src, int count, int expected,
- int chars, int terminator, int pad)
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
{
int nr_bytes_poison;
int max_expected;
@@ -79,12 +80,12 @@ static void tc(struct kunit *test, char *src, int count, int expected,
}
}
-static void strscpy_test(struct kunit *test)
+static void test_strscpy(struct kunit *test)
{
char dest[8];
/*
- * tc() uses a destination buffer of size 6 and needs at
+ * strscpy_check() uses a destination buffer of size 6 and needs at
* least 2 characters spare (one for null and one to check for
* overflow). This means we should only call tc() with
* strings up to a maximum of 4 characters long and 'count'
@@ -92,27 +93,27 @@ static void strscpy_test(struct kunit *test)
* the buffer size in tc().
*/
- /* tc(test, src, count, expected, chars, terminator, pad) */
- tc(test, "a", 0, -E2BIG, 0, 0, 0);
- tc(test, "", 0, -E2BIG, 0, 0, 0);
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
- tc(test, "a", 1, -E2BIG, 0, 1, 0);
- tc(test, "", 1, 0, 0, 1, 0);
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
- tc(test, "ab", 2, -E2BIG, 1, 1, 0);
- tc(test, "a", 2, 1, 1, 1, 0);
- tc(test, "", 2, 0, 0, 1, 1);
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
- tc(test, "abc", 3, -E2BIG, 2, 1, 0);
- tc(test, "ab", 3, 2, 2, 1, 0);
- tc(test, "a", 3, 1, 1, 1, 1);
- tc(test, "", 3, 0, 0, 1, 2);
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
- tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
- tc(test, "abc", 4, 3, 3, 1, 0);
- tc(test, "ab", 4, 2, 2, 1, 1);
- tc(test, "a", 4, 1, 1, 1, 2);
- tc(test, "", 4, 0, 0, 1, 3);
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
/* Compile-time-known source strings. */
KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
@@ -127,7 +128,7 @@ static void strscpy_test(struct kunit *test)
}
static struct kunit_case strscpy_test_cases[] = {
- KUNIT_CASE(strscpy_test),
+ KUNIT_CASE(test_strscpy),
{}
};
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 65f22c2578..6b2b33579f 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -380,6 +380,47 @@ static void __init test_replace(void)
expect_eq_bitmap(bmap, exp3_1_0, nbits);
}
+static const unsigned long sg_mask[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000035aULL),
+};
+
+static const unsigned long sg_src[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000667ULL),
+};
+
+static const unsigned long sg_gather_exp[] __initconst = {
+ BITMAP_FROM_U64(0x0000000000000029ULL),
+};
+
+static const unsigned long sg_scatter_exp[] __initconst = {
+ BITMAP_FROM_U64(0x000000000000021aULL),
+};
+
+static void __init test_bitmap_sg(void)
+{
+ unsigned int nbits = 64;
+ DECLARE_BITMAP(bmap_gather, 100);
+ DECLARE_BITMAP(bmap_scatter, 100);
+ DECLARE_BITMAP(bmap_tmp, 100);
+ DECLARE_BITMAP(bmap_res, 100);
+
+ /* Simple gather call */
+ bitmap_zero(bmap_gather, 100);
+ bitmap_gather(bmap_gather, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_gather_exp, bmap_gather, nbits);
+
+ /* Simple scatter call */
+ bitmap_zero(bmap_scatter, 100);
+ bitmap_scatter(bmap_scatter, sg_src, sg_mask, nbits);
+ expect_eq_bitmap(sg_scatter_exp, bmap_scatter, nbits);
+
+ /* Scatter/gather relationship */
+ bitmap_zero(bmap_tmp, 100);
+ bitmap_gather(bmap_tmp, bmap_scatter, sg_mask, nbits);
+ bitmap_scatter(bmap_res, bmap_tmp, sg_mask, nbits);
+ expect_eq_bitmap(bmap_scatter, bmap_res, nbits);
+}
+
#define PARSE_TIME 0x1
#define NO_LEN 0x2
@@ -1252,6 +1293,7 @@ static void __init selftest(void)
test_copy();
test_bitmap_region();
test_replace();
+ test_bitmap_sg();
test_bitmap_arr32();
test_bitmap_arr64();
test_bitmap_parse();
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 43d9dfd57a..1eec3b7ac6 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -58,11 +58,14 @@ static int num_test_devs;
* @need_mod_put for your tests case.
*/
enum kmod_test_case {
+ /* private: */
__TEST_KMOD_INVALID = 0,
+ /* public: */
TEST_KMOD_DRIVER,
TEST_KMOD_FS_TYPE,
+ /* private: */
__TEST_KMOD_MAX,
};
@@ -82,6 +85,7 @@ struct kmod_test_device;
* @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER
* @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
+ * @task_sync: kthread's task_struct or %NULL if not running
* @thread_idx: thread ID
* @test_dev: test device test is being performed under
* @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
@@ -108,7 +112,7 @@ struct kmod_test_device_info {
* @dev: pointer to misc_dev's own struct device
* @config_mutex: protects configuration of test
* @trigger_mutex: the test trigger can only be fired once at a time
- * @thread_lock: protects @done count, and the @info per each thread
+ * @thread_mutex: protects @done count, and the @info per each thread
* @done: number of threads which have completed or failed
* @test_is_oom: when we run out of memory, use this to halt moving forward
* @kthreads_done: completion used to signal when all work is done
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 29185ac5c7..399380db44 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -3599,6 +3599,45 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
mas_unlock(&mas);
}
+static noinline void __init alloc_cyclic_testing(struct maple_tree *mt)
+{
+ unsigned long location;
+ unsigned long next;
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+
+ next = 0;
+ mtree_lock(mt);
+ for (int i = 0; i < 100; i++) {
+ mas_alloc_cyclic(&mas, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MAS_BUG_ON(&mas, i != location - 2);
+ MAS_BUG_ON(&mas, mas.index != location);
+ MAS_BUG_ON(&mas, mas.last != location);
+ MAS_BUG_ON(&mas, i != next - 3);
+ }
+
+ mtree_unlock(mt);
+ mtree_destroy(mt);
+ next = 0;
+ mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE);
+ for (int i = 0; i < 100; i++) {
+ mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, i != location - 2);
+ MT_BUG_ON(mt, i != next - 3);
+ MT_BUG_ON(mt, mtree_load(mt, location) != mt);
+ }
+
+ mtree_destroy(mt);
+ /* Overflow test */
+ next = ULONG_MAX - 1;
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 0);
+ ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL);
+ MT_BUG_ON(mt, ret != 1);
+}
+
static DEFINE_MTREE(tree);
static int __init maple_tree_seed(void)
{
@@ -3880,6 +3919,11 @@ static int __init maple_tree_seed(void)
check_state_handling(&tree);
mtree_destroy(&tree);
+ mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
+ alloc_cyclic_testing(&tree);
+ mtree_destroy(&tree);
+
+
#if defined(BENCH)
skip:
#endif
diff --git a/lib/test_string.c b/lib/test_string.c
deleted file mode 100644
index c5cb92fb71..0000000000
--- a/lib/test_string.c
+++ /dev/null
@@ -1,257 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-static __init int memset16_selftest(void)
-{
- unsigned i, j, k;
- u16 v, *p;
-
- p = kmalloc(256 * 2 * 2, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset16(p + i, 0xb1b2, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2)
- goto fail;
- } else {
- if (v != 0xa1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset32_selftest(void)
-{
- unsigned i, j, k;
- u32 v, *p;
-
- p = kmalloc(256 * 2 * 4, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset32(p + i, 0xb1b2b3b4, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int memset64_selftest(void)
-{
- unsigned i, j, k;
- u64 v, *p;
-
- p = kmalloc(256 * 2 * 8, GFP_KERNEL);
- if (!p)
- return -1;
-
- for (i = 0; i < 256; i++) {
- for (j = 0; j < 256; j++) {
- memset(p, 0xa1, 256 * 2 * sizeof(v));
- memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j);
- for (k = 0; k < 512; k++) {
- v = p[k];
- if (k < i) {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- } else if (k < i + j) {
- if (v != 0xb1b2b3b4b5b6b7b8ULL)
- goto fail;
- } else {
- if (v != 0xa1a1a1a1a1a1a1a1ULL)
- goto fail;
- }
- }
- }
- }
-
-fail:
- kfree(p);
- if (i < 256)
- return (i << 24) | (j << 16) | k | 0x8000;
- return 0;
-}
-
-static __init int strchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- result = strchr(test_string, test_string[i]);
- if (result - test_string != i)
- return i + 'a';
- }
-
- result = strchr(empty_string, '\0');
- if (result != empty_string)
- return 0x101;
-
- result = strchr(empty_string, 'a');
- if (result)
- return 0x102;
-
- result = strchr(test_string, 'z');
- if (result)
- return 0x103;
-
- return 0;
-}
-
-static __init int strnchr_selftest(void)
-{
- const char *test_string = "abcdefghijkl";
- const char *empty_string = "";
- char *result;
- int i, j;
-
- for (i = 0; i < strlen(test_string) + 1; i++) {
- for (j = 0; j < strlen(test_string) + 2; j++) {
- result = strnchr(test_string, j, test_string[i]);
- if (j <= i) {
- if (!result)
- continue;
- return ((i + 'a') << 8) | j;
- }
- if (result - test_string != i)
- return ((i + 'a') << 8) | j;
- }
- }
-
- result = strnchr(empty_string, 0, '\0');
- if (result)
- return 0x10001;
-
- result = strnchr(empty_string, 1, '\0');
- if (result != empty_string)
- return 0x10002;
-
- result = strnchr(empty_string, 1, 'a');
- if (result)
- return 0x10003;
-
- result = strnchr(NULL, 0, '\0');
- if (result)
- return 0x10004;
-
- return 0;
-}
-
-static __init int strspn_selftest(void)
-{
- static const struct strspn_test {
- const char str[16];
- const char accept[16];
- const char reject[16];
- unsigned a;
- unsigned r;
- } tests[] __initconst = {
- { "foobar", "", "", 0, 6 },
- { "abba", "abc", "ABBA", 4, 4 },
- { "abba", "a", "b", 1, 1 },
- { "", "abc", "abc", 0, 0},
- };
- const struct strspn_test *s = tests;
- size_t i, res;
-
- for (i = 0; i < ARRAY_SIZE(tests); ++i, ++s) {
- res = strspn(s->str, s->accept);
- if (res != s->a)
- return 0x100 + 2*i;
- res = strcspn(s->str, s->reject);
- if (res != s->r)
- return 0x100 + 2*i + 1;
- }
- return 0;
-}
-
-static __exit void string_selftest_remove(void)
-{
-}
-
-static __init int string_selftest_init(void)
-{
- int test, subtest;
-
- test = 1;
- subtest = memset16_selftest();
- if (subtest)
- goto fail;
-
- test = 2;
- subtest = memset32_selftest();
- if (subtest)
- goto fail;
-
- test = 3;
- subtest = memset64_selftest();
- if (subtest)
- goto fail;
-
- test = 4;
- subtest = strchr_selftest();
- if (subtest)
- goto fail;
-
- test = 5;
- subtest = strnchr_selftest();
- if (subtest)
- goto fail;
-
- test = 6;
- subtest = strspn_selftest();
- if (subtest)
- goto fail;
-
- pr_info("String selftests succeeded\n");
- return 0;
-fail:
- pr_crit("String selftest failure %d.%08x\n", test, subtest);
- return 0;
-}
-
-module_init(string_selftest_init);
-module_exit(string_selftest_remove);
-MODULE_LICENSE("GPL v2");
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
index 2062be1f2e..c288df9372 100644
--- a/lib/test_ubsan.c
+++ b/lib/test_ubsan.c
@@ -11,6 +11,39 @@ typedef void(*test_ubsan_fp)(void);
#config, IS_ENABLED(config) ? "y" : "n"); \
} while (0)
+static void test_ubsan_add_overflow(void)
+{
+ volatile int val = INT_MAX;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val += 2;
+}
+
+static void test_ubsan_sub_overflow(void)
+{
+ volatile int val = INT_MIN;
+ volatile int val2 = 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val -= val2;
+}
+
+static void test_ubsan_mul_overflow(void)
+{
+ volatile int val = INT_MAX / 2;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val *= 3;
+}
+
+static void test_ubsan_negate_overflow(void)
+{
+ volatile int val = INT_MIN;
+
+ UBSAN_TEST(CONFIG_UBSAN_SIGNED_WRAP);
+ val = -val;
+}
+
static void test_ubsan_divrem_overflow(void)
{
volatile int val = 16;
@@ -23,8 +56,8 @@ static void test_ubsan_divrem_overflow(void)
static void test_ubsan_shift_out_of_bounds(void)
{
volatile int neg = -1, wrap = 4;
- int val1 = 10;
- int val2 = INT_MAX;
+ volatile int val1 = 10;
+ volatile int val2 = INT_MAX;
UBSAN_TEST(CONFIG_UBSAN_SHIFT, "negative exponent");
val1 <<= neg;
@@ -90,6 +123,10 @@ static void test_ubsan_misaligned_access(void)
}
static const test_ubsan_fp test_ubsan_array[] = {
+ test_ubsan_add_overflow,
+ test_ubsan_sub_overflow,
+ test_ubsan_mul_overflow,
+ test_ubsan_negate_overflow,
test_ubsan_shift_out_of_bounds,
test_ubsan_out_of_bounds,
test_ubsan_load_invalid_value,
@@ -97,7 +134,7 @@ static const test_ubsan_fp test_ubsan_array[] = {
};
/* Excluded because they Oops the module. */
-static const test_ubsan_fp skip_ubsan_array[] = {
+static __used const test_ubsan_fp skip_ubsan_array[] = {
test_ubsan_divrem_overflow,
};
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 3718d98864..4ddf769861 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -117,7 +117,7 @@ static int align_shift_alloc_test(void)
int i;
for (i = 0; i < BITS_PER_LONG; i++) {
- align = ((unsigned long) 1) << i;
+ align = 1UL << i;
ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0,
__builtin_return_address(0));
@@ -501,7 +501,7 @@ static int test_func(void *private)
}
static int
-init_test_configurtion(void)
+init_test_configuration(void)
{
/*
* A maximum number of workers is defined as hard-coded
@@ -531,7 +531,7 @@ static void do_concurrent_test(void)
/*
* Set some basic configurations plus sanity check.
*/
- ret = init_test_configurtion();
+ ret = init_test_configuration();
if (ret < 0)
return;
@@ -600,12 +600,7 @@ static int vmalloc_test_init(void)
return -EAGAIN; /* Fail will directly unload the module */
}
-static void vmalloc_test_exit(void)
-{
-}
-
module_init(vmalloc_test_init)
-module_exit(vmalloc_test_exit)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Uladzislau Rezki");
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index e77d485644..928fc20337 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -423,6 +423,59 @@ static noinline void check_cmpxchg(struct xarray *xa)
XA_BUG_ON(xa, !xa_empty(xa));
}
+static noinline void check_cmpxchg_order(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ void *FIVE = xa_mk_value(5);
+ unsigned int i, order = 3;
+
+ XA_BUG_ON(xa, xa_store_order(xa, 0, order, FIVE, GFP_KERNEL));
+
+ /* Check entry FIVE has the order saved */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != order);
+
+ /* Check all the tied indexes have the same entry and order */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Ensure that nothing is stored at index '1 << order' */
+ XA_BUG_ON(xa, xa_load(xa, 1 << order) != NULL);
+
+ /*
+ * Additionally, keep the node information and the order at
+ * '1 << order'
+ */
+ XA_BUG_ON(xa, xa_store_order(xa, 1 << order, order, FIVE, GFP_KERNEL));
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ /* Conditionally replace FIVE entry at index '0' with NULL */
+ XA_BUG_ON(xa, xa_cmpxchg(xa, 0, FIVE, NULL, GFP_KERNEL) != FIVE);
+
+ /* Verify the order is lost at FIVE (and old) entries */
+ XA_BUG_ON(xa, xa_get_order(xa, xa_to_value(FIVE)) != 0);
+
+ /* Verify the order and entries are lost in all the tied indexes */
+ for (i = 0; i < (1 << order); i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != NULL);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != 0);
+ }
+
+ /* Verify node and order are kept at '1 << order' */
+ for (i = (1 << order); i < (1 << order) + (1 << order) - 1; i++) {
+ XA_BUG_ON(xa, xa_load(xa, i) != FIVE);
+ XA_BUG_ON(xa, xa_get_order(xa, i) != order);
+ }
+
+ xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
+ XA_BUG_ON(xa, !xa_empty(xa));
+#endif
+}
+
static noinline void check_reserve(struct xarray *xa)
{
void *entry;
@@ -674,6 +727,186 @@ static noinline void check_multi_store(struct xarray *xa)
#endif
}
+#ifdef CONFIG_XARRAY_MULTI
+/* mimics page cache __filemap_add_folio() */
+static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
+ unsigned long index,
+ unsigned int order,
+ void *p)
+{
+ XA_STATE(xas, xa, index);
+ unsigned int nrpages = 1UL << order;
+
+ /* users are responsible for index alignemnt to the order when adding */
+ XA_BUG_ON(xa, index & (nrpages - 1));
+
+ xas_set_order(&xas, index, order);
+
+ do {
+ xas_lock_irq(&xas);
+ xas_store(&xas, p);
+ xas_unlock_irq(&xas);
+ /*
+ * In our selftest case the only failure we can expect is for
+ * there not to be enough memory as we're not mimicking the
+ * entire page cache, so verify that's the only error we can run
+ * into here. The xas_nomem() which follows will ensure to fix
+ * that condition for us so to chug on on the loop.
+ */
+ XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
+}
+
+/* mimics page_cache_delete() */
+static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, index);
+
+ xas_set_order(&xas, index, order);
+ xas_store(&xas, NULL);
+ xas_init_marks(&xas);
+}
+
+static noinline void check_xa_multi_store_adv_delete(struct xarray *xa,
+ unsigned long index,
+ unsigned int order)
+{
+ xa_lock_irq(xa);
+ check_xa_multi_store_adv_del_entry(xa, index, order);
+ xa_unlock_irq(xa);
+}
+
+/* mimics page cache filemap_get_entry() */
+static noinline void *test_get_entry(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ void *p;
+ static unsigned int loops = 0;
+
+ rcu_read_lock();
+repeat:
+ xas_reset(&xas);
+ p = xas_load(&xas);
+ if (xas_retry(&xas, p))
+ goto repeat;
+ rcu_read_unlock();
+
+ /*
+ * This is not part of the page cache, this selftest is pretty
+ * aggressive and does not want to trust the xarray API but rather
+ * test it, and for order 20 (4 GiB block size) we can loop over
+ * over a million entries which can cause a soft lockup. Page cache
+ * APIs won't be stupid, proper page cache APIs loop over the proper
+ * order so when using a larger order we skip shared entries.
+ */
+ if (++loops % XA_CHECK_SCHED == 0)
+ schedule();
+
+ return p;
+}
+
+static unsigned long some_val = 0xdeadbeef;
+static unsigned long some_val_2 = 0xdeaddead;
+
+/* mimics the page cache usage */
+static noinline void check_xa_multi_store_adv(struct xarray *xa,
+ unsigned long pos,
+ unsigned int order)
+{
+ unsigned int nrpages = 1UL << order;
+ unsigned long index, base, next_index, next_next_index;
+ unsigned int i;
+
+ index = pos >> PAGE_SHIFT;
+ base = round_down(index, nrpages);
+ next_index = round_down(base + nrpages, nrpages);
+ next_next_index = round_down(next_index + nrpages, nrpages);
+
+ check_xa_multi_store_adv_add(xa, base, order, &some_val);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val);
+
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL);
+
+ /* Use order 0 for the next item */
+ check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2);
+ XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2);
+
+ /* Remove the next item */
+ check_xa_multi_store_adv_delete(xa, next_index, 0);
+
+ /* Now use order for a new pointer */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ check_xa_multi_store_adv_delete(xa, base, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* starting fresh again */
+
+ /* let's test some holes now */
+
+ /* hole at base and next_next */
+ check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL);
+
+ check_xa_multi_store_adv_delete(xa, next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+
+ /* hole at base and next */
+
+ check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != NULL);
+
+ for (i = 0; i < nrpages; i++)
+ XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2);
+
+ check_xa_multi_store_adv_delete(xa, next_next_index, order);
+ XA_BUG_ON(xa, !xa_empty(xa));
+}
+#endif
+
+static noinline void check_multi_store_advanced(struct xarray *xa)
+{
+#ifdef CONFIG_XARRAY_MULTI
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned long end = ULONG_MAX/2;
+ unsigned long pos, i;
+
+ /*
+ * About 117 million tests below.
+ */
+ for (pos = 7; pos < end; pos = (pos * pos) + 564) {
+ for (i = 0; i < max_order; i++) {
+ check_xa_multi_store_adv(xa, pos, i);
+ check_xa_multi_store_adv(xa, pos + 157, i);
+ }
+ }
+#endif
+}
+
static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
{
int i;
@@ -1555,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order, unsigned int new_order)
{
XA_STATE_ORDER(xas, xa, index, new_order);
- unsigned int i;
+ unsigned int i, found;
+ void *entry;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
@@ -1574,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+
xa_destroy(xa);
}
@@ -1801,9 +2046,11 @@ static int xarray_checks(void)
check_xas_erase(&array);
check_insert(&array);
check_cmpxchg(&array);
+ check_cmpxchg_order(&array);
check_reserve(&array);
check_reserve(&xa0);
check_multi_store(&array);
+ check_multi_store_advanced(&array);
check_get_order(&array);
check_xa_alloc();
check_find(&array);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index df4f8d1354..a1c983d148 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -44,9 +44,10 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_shift_out_of_bounds:
return "UBSAN: shift out of bounds";
#endif
-#ifdef CONFIG_UBSAN_DIV_ZERO
+#if defined(CONFIG_UBSAN_DIV_ZERO) || defined(CONFIG_UBSAN_SIGNED_WRAP)
/*
- * SanitizerKind::IntegerDivideByZero emits
+ * SanitizerKind::IntegerDivideByZero and
+ * SanitizerKind::SignedIntegerOverflow emit
* SanitizerHandler::DivremOverflow.
*/
case ubsan_divrem_overflow:
@@ -78,6 +79,19 @@ const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type)
case ubsan_type_mismatch:
return "UBSAN: type mismatch";
#endif
+#ifdef CONFIG_UBSAN_SIGNED_WRAP
+ /*
+ * SanitizerKind::SignedIntegerOverflow emits
+ * SanitizerHandler::AddOverflow, SanitizerHandler::SubOverflow,
+ * or SanitizerHandler::MulOverflow.
+ */
+ case ubsan_add_overflow:
+ return "UBSAN: integer addition overflow";
+ case ubsan_sub_overflow:
+ return "UBSAN: integer subtraction overflow";
+ case ubsan_mul_overflow:
+ return "UBSAN: integer multiplication overflow";
+#endif
default:
return "UBSAN: unrecognized failure code";
}
@@ -222,6 +236,74 @@ static void ubsan_epilogue(void)
check_panic_on_warn("UBSAN");
}
+static void handle_overflow(struct overflow_data *data, void *lhs,
+ void *rhs, char op)
+{
+
+ struct type_descriptor *type = data->type;
+ char lhs_val_str[VALUE_LENGTH];
+ char rhs_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, type_is_signed(type) ?
+ "signed-integer-overflow" :
+ "unsigned-integer-overflow");
+
+ val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs);
+ val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs);
+ pr_err("%s %c %s cannot be represented in type %s\n",
+ lhs_val_str,
+ op,
+ rhs_val_str,
+ type->type_name);
+
+ ubsan_epilogue();
+}
+
+void __ubsan_handle_add_overflow(void *data,
+ void *lhs, void *rhs)
+{
+
+ handle_overflow(data, lhs, rhs, '+');
+}
+EXPORT_SYMBOL(__ubsan_handle_add_overflow);
+
+void __ubsan_handle_sub_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '-');
+}
+EXPORT_SYMBOL(__ubsan_handle_sub_overflow);
+
+void __ubsan_handle_mul_overflow(void *data,
+ void *lhs, void *rhs)
+{
+ handle_overflow(data, lhs, rhs, '*');
+}
+EXPORT_SYMBOL(__ubsan_handle_mul_overflow);
+
+void __ubsan_handle_negate_overflow(void *_data, void *old_val)
+{
+ struct overflow_data *data = _data;
+ char old_val_str[VALUE_LENGTH];
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "negation-overflow");
+
+ val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val);
+
+ pr_err("negation of %s cannot be represented in type %s:\n",
+ old_val_str, data->type->type_name);
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_negate_overflow);
+
+
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
{
struct overflow_data *data = _data;
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 5d99ab8191..0982578fbd 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -124,15 +124,32 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
-void __ubsan_handle_out_of_bounds(void *_data, void *index);
-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_builtin_unreachable(void *_data);
-void __ubsan_handle_load_invalid_value(void *_data, void *val);
-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
- unsigned long align,
- unsigned long offset);
+/*
+ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
+ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
+ * Fix this for earlier Clang versions by forcing the calling convention
+ * to use non-register arguments.
+ */
+#if defined(CONFIG_X86_32) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
+# define ubsan_linkage asmlinkage
+#else
+# define ubsan_linkage
+#endif
+
+void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
+void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
+void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
+void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
+void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
#endif
diff --git a/lib/xarray.c b/lib/xarray.c
index 39f07bfc4d..5e7d6334d7 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -969,8 +969,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
return marks;
}
+static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
+ xa_mark_t mark)
+{
+ int i;
+
+ if (sibs == 0)
+ node_mark_all(node, mark);
+ else {
+ for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
+ node_set_mark(node, i, mark);
+ }
+}
+
static void node_set_marks(struct xa_node *node, unsigned int offset,
- struct xa_node *child, unsigned int marks)
+ struct xa_node *child, unsigned int sibs,
+ unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
@@ -978,7 +992,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
- node_mark_all(child, mark);
+ node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
@@ -1077,7 +1091,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
- node_set_marks(node, offset, child, marks);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
@@ -1086,7 +1101,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} else {
unsigned int canon = offset - xas->xa_sibs;
- node_set_marks(node, canon, NULL, marks);
+ node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],