summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--lib/Kconfig11
-rw-r--r--lib/Kconfig.debug23
-rw-r--r--lib/Makefile12
-rw-r--r--lib/bitmap-str.c510
-rw-r--r--lib/bitmap.c680
-rw-r--r--lib/closure.c (renamed from drivers/md/bcache/closure.c)50
-rw-r--r--lib/cpumask.c21
-rw-r--r--lib/debugobjects.c202
-rw-r--r--lib/decompress_unxz.c3
-rw-r--r--lib/errname.c1
-rw-r--r--lib/fonts/Kconfig2
-rw-r--r--lib/fw_table.c176
-rw-r--r--lib/generic-radix-tree.c59
-rw-r--r--lib/iov_iter.c437
-rw-r--r--lib/kobject.c24
-rw-r--r--lib/kobject_uevent.c8
-rw-r--r--lib/kunit/assert.c14
-rw-r--r--lib/kunit/debugfs.c58
-rw-r--r--lib/kunit/executor.c4
-rw-r--r--lib/kunit/kunit-example-test.c5
-rw-r--r--lib/kunit/kunit-test.c53
-rw-r--r--lib/kunit/string-stream-test.c525
-rw-r--r--lib/kunit/string-stream.c100
-rw-r--r--lib/kunit/string-stream.h16
-rw-r--r--lib/kunit/test.c77
-rw-r--r--lib/llist.c28
-rw-r--r--lib/lwq.c158
-rw-r--r--lib/nlattr.c22
-rw-r--r--lib/objpool.c297
-rw-r--r--lib/percpu_counter.c79
-rw-r--r--lib/raid6/Makefile4
-rw-r--r--lib/raid6/algos.c4
-rw-r--r--lib/raid6/int.uc9
-rw-r--r--lib/rcuref.c2
-rw-r--r--lib/seq_buf.c28
-rw-r--r--lib/string_helpers.c10
-rw-r--r--lib/test_bitmap.c24
-rw-r--r--lib/test_bpf.c371
-rw-r--r--lib/test_objpool.c690
-rw-r--r--lib/ucs2_string.c52
-rw-r--r--lib/vsprintf.c25
-rw-r--r--lib/xz/Kconfig5
42 files changed, 3546 insertions, 1333 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c686f4adc1..3ea1c830ef 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -506,6 +506,9 @@ config ASSOCIATIVE_ARRAY
for more information.
+config CLOSURES
+ bool
+
config HAS_IOMEM
bool
depends on !NO_IOMEM
@@ -729,6 +732,11 @@ config PARMAN
config OBJAGG
tristate "objagg" if COMPILE_TEST
+config LWQ_TEST
+ bool "Boot-time test for lwq queuing"
+ help
+ Run boot-time test of light-weight queuing.
+
endmenu
config GENERIC_IOREMAP
@@ -764,3 +772,6 @@ config ASN1_ENCODER
config POLYNOMIAL
tristate
+
+config FIRMWARE_TABLE
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1331f3186f..ef39dffffd 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -759,7 +759,7 @@ config SHRINKER_DEBUG
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL && !IA64
+ depends on DEBUG_KERNEL
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
@@ -1720,6 +1720,15 @@ config DEBUG_NOTIFIERS
This is a relatively cheap check but if you care about maximum
performance, say N.
+config DEBUG_CLOSURES
+ bool "Debug closures (bcache async widgits)"
+ depends on CLOSURES
+ select DEBUG_FS
+ help
+ Keeps all active closures in a linked list and provides a debugfs
+ interface to list them, which makes it possible to see asynchronous
+ operations that get stuck.
+
config DEBUG_MAPLE_TREE
bool "Debug maple trees"
depends on DEBUG_KERNEL
@@ -2225,6 +2234,7 @@ config TEST_DIV64
config TEST_IOV_ITER
tristate "Test iov_iter operation" if !KUNIT_ALL_TESTS
depends on KUNIT
+ depends on MMU
default KUNIT_ALL_TESTS
help
Enable this to turn on testing of the operation of the I/O iterator
@@ -2930,6 +2940,17 @@ config TEST_CLOCKSOURCE_WATCHDOG
If unsure, say N.
+config TEST_OBJPOOL
+ tristate "Test module for correctness and stress of objpool"
+ default n
+ depends on m && DEBUG_KERNEL
+ help
+ This builds the "test_objpool" module that should be used for
+ correctness verification and concurrent testings of objects
+ allocation and reclamation.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index 740109b6e2..6b09731d8e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -34,7 +34,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o win_minmax.o memcat_p.o \
- buildid.o
+ buildid.o objpool.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -45,10 +45,10 @@ obj-y += lockref.o
obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
- bsearch.o find_bit.o llist.o memweight.o kfifo.o \
+ bsearch.o find_bit.o llist.o lwq.o memweight.o kfifo.o \
percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \
- generic-radix-tree.o
+ generic-radix-tree.o bitmap-str.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -107,6 +107,8 @@ obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o
CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE)
obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o
+obj-$(CONFIG_TEST_OBJPOOL) += test_objpool.o
+
#
# CFLAGS for compiling floating point code inside the kernel. x86/Makefile turns
# off the generation of FPU/SSE* instructions for kernel proper but FPU_FLAGS
@@ -255,6 +257,8 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
+obj-$(CONFIG_CLOSURES) += closure.o
+
obj-$(CONFIG_DQL) += dynamic_queue_limits.o
obj-$(CONFIG_GLOB) += glob.o
@@ -405,6 +409,8 @@ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
+obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
+
# FORTIFY_SOURCE compile-time behavior tests
TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
diff --git a/lib/bitmap-str.c b/lib/bitmap-str.c
new file mode 100644
index 0000000000..be74520950
--- /dev/null
+++ b/lib/bitmap-str.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitmap.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/hex.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "kstrtox.h"
+
+/**
+ * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ */
+int bitmap_parse_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parse_user);
+
+/**
+ * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * @buf: page aligned buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ *
+ * Output format is a comma-separated list of decimal numbers and
+ * ranges if list is specified or hex digits grouped into comma-separated
+ * sets of 8 digits/set. Returns the number of characters written to buf.
+ *
+ * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
+ * area and that sufficient storage remains at @buf to accommodate the
+ * bitmap_print_to_pagebuf() output. Returns the number of characters
+ * actually printed to @buf, excluding terminating '\0'.
+ */
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits)
+{
+ ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
+
+ return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
+ scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
+}
+EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+
+/**
+ * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * true: print in decimal list format
+ * false: print in hexadecimal bitmask format
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ */
+static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ const char *fmt = list ? "%*pbl\n" : "%*pb\n";
+ ssize_t size;
+ void *data;
+
+ data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
+ if (!data)
+ return -ENOMEM;
+
+ size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
+ kfree(data);
+
+ return size;
+}
+
+/**
+ * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
+ * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
+ * bitmask and decimal list to userspace by sysfs ABI.
+ * Drivers might be using a normal attribute for this kind of ABIs. A
+ * normal attribute typically has show entry as below::
+ *
+ * static ssize_t example_attribute_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * ...
+ * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ * }
+ *
+ * show entry of attribute has no offset and count parameters and this
+ * means the file is limited to one page only.
+ * bitmap_print_to_pagebuf() API works terribly well for this kind of
+ * normal attribute with buf parameter and without offset, count::
+ *
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * int nmaskbits)
+ * {
+ * }
+ *
+ * The problem is once we have a large bitmap, we have a chance to get a
+ * bitmask or list more than one page. Especially for list, it could be
+ * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
+ * It turns out bin_attribute is a way to break this limit. bin_attribute
+ * has show entry as below::
+ *
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * struct bin_attribute *attr, char *buf,
+ * loff_t offset, size_t count)
+ * {
+ * ...
+ * }
+ *
+ * With the new offset and count parameters, this makes sysfs ABI be able
+ * to support file size more than one page. For example, offset could be
+ * >= 4096.
+ * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
+ * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
+ * make those drivers be able to support large bitmask and list after they
+ * move to use bin_attribute. In result, we have to pass the corresponding
+ * parameters such as off, count from bin_attribute show entry to this API.
+ *
+ * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
+ * is similar with cpumap_print_to_pagebuf(), the difference is that
+ * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
+ * the destination buffer is exactly one page and won't be more than one page.
+ * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
+ * hand, mainly serves bin_attribute which doesn't work with exact one page,
+ * and it can break the size limit of converted decimal list and hexadecimal
+ * bitmask.
+ *
+ * WARNING!
+ *
+ * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
+ * It is intended to workaround sysfs limitations discussed above and should be
+ * used carefully in general case for the following reasons:
+ *
+ * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
+ * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
+ * - @off and @count are NOT offset and number of bits to print.
+ * - If printing part of bitmap as list, the resulting string is not a correct
+ * list representation of bitmap. Particularly, some bits within or out of
+ * related interval may be erroneously set or unset. The format of the string
+ * may be broken, so bitmap_parselist-like parser may fail parsing it.
+ * - If printing the whole bitmap as list by parts, user must ensure the order
+ * of calls of the function such that the offset is incremented linearly.
+ * - If printing the whole bitmap as list by parts, user must keep bitmap
+ * unchanged between the very first and very last call. Otherwise concatenated
+ * result may be incorrect, and format may be broken.
+ *
+ * Returns the number of characters actually printed to @buf
+ */
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
+
+/**
+ * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * Everything is same with the above bitmap_print_bitmask_to_buf() except
+ * the print format.
+ */
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_list_to_buf);
+
+/*
+ * Region 9-38:4/10 describes the following bitmap structure:
+ * 0 9 12 18 38 N
+ * .........****......****......****..................
+ * ^ ^ ^ ^ ^
+ * start off group_len end nbits
+ */
+struct region {
+ unsigned int start;
+ unsigned int off;
+ unsigned int group_len;
+ unsigned int end;
+ unsigned int nbits;
+};
+
+static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
+{
+ unsigned int start;
+
+ for (start = r->start; start <= r->end; start += r->group_len)
+ bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
+}
+
+static int bitmap_check_region(const struct region *r)
+{
+ if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
+ return -EINVAL;
+
+ if (r->end >= r->nbits)
+ return -ERANGE;
+
+ return 0;
+}
+
+static const char *bitmap_getnum(const char *str, unsigned int *num,
+ unsigned int lastbit)
+{
+ unsigned long long n;
+ unsigned int len;
+
+ if (str[0] == 'N') {
+ *num = lastbit;
+ return str + 1;
+ }
+
+ len = _parse_integer(str, 10, &n);
+ if (!len)
+ return ERR_PTR(-EINVAL);
+ if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
+ return ERR_PTR(-EOVERFLOW);
+
+ *num = n;
+ return str + len;
+}
+
+static inline bool end_of_str(char c)
+{
+ return c == '\0' || c == '\n';
+}
+
+static inline bool __end_of_region(char c)
+{
+ return isspace(c) || c == ',';
+}
+
+static inline bool end_of_region(char c)
+{
+ return __end_of_region(c) || end_of_str(c);
+}
+
+/*
+ * The format allows commas and whitespaces at the beginning
+ * of the region.
+ */
+static const char *bitmap_find_region(const char *str)
+{
+ while (__end_of_region(*str))
+ str++;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+static const char *bitmap_find_region_reverse(const char *start, const char *end)
+{
+ while (start <= end && __end_of_region(*end))
+ end--;
+
+ return end;
+}
+
+static const char *bitmap_parse_region(const char *str, struct region *r)
+{
+ unsigned int lastbit = r->nbits - 1;
+
+ if (!strncasecmp(str, "all", 3)) {
+ r->start = 0;
+ r->end = lastbit;
+ str += 3;
+
+ goto check_pattern;
+ }
+
+ str = bitmap_getnum(str, &r->start, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (end_of_region(*str))
+ goto no_end;
+
+ if (*str != '-')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->end, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+check_pattern:
+ if (end_of_region(*str))
+ goto no_pattern;
+
+ if (*str != ':')
+ return ERR_PTR(-EINVAL);
+
+ str = bitmap_getnum(str + 1, &r->off, lastbit);
+ if (IS_ERR(str))
+ return str;
+
+ if (*str != '/')
+ return ERR_PTR(-EINVAL);
+
+ return bitmap_getnum(str + 1, &r->group_len, lastbit);
+
+no_end:
+ r->end = r->start;
+no_pattern:
+ r->off = r->end + 1;
+ r->group_len = r->end + 1;
+
+ return end_of_str(*str) ? NULL : str;
+}
+
+/**
+ * bitmap_parselist - convert list format ASCII string to bitmap
+ * @buf: read user string from this buffer; must be terminated
+ * with a \0 or \n.
+ * @maskp: write resulting mask here
+ * @nmaskbits: number of bits in mask to be written
+ *
+ * Input format is a comma-separated list of decimal numbers and
+ * ranges. Consecutively set bits are shown as two hyphen-separated
+ * decimal numbers, the smallest and largest bit numbers set in
+ * the range.
+ * Optionally each range can be postfixed to denote that only parts of it
+ * should be set. The range will divided to groups of specific size.
+ * From each group will be used only defined amount of bits.
+ * Syntax: range:used_size/group_size
+ * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
+ * The value 'N' can be used as a dynamically substituted token for the
+ * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
+ * dynamic, so if system changes cause the bitmap width to change, such
+ * as more cores in a CPU list, then any ranges using N will also change.
+ *
+ * Returns: 0 on success, -errno on invalid input strings. Error values:
+ *
+ * - ``-EINVAL``: wrong region format
+ * - ``-EINVAL``: invalid character in string
+ * - ``-ERANGE``: bit number specified too large for mask
+ * - ``-EOVERFLOW``: integer overflow in the input parameters
+ */
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
+{
+ struct region r;
+ long ret;
+
+ r.nbits = nmaskbits;
+ bitmap_zero(maskp, r.nbits);
+
+ while (buf) {
+ buf = bitmap_find_region(buf);
+ if (buf == NULL)
+ return 0;
+
+ buf = bitmap_parse_region(buf, &r);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_check_region(&r);
+ if (ret)
+ return ret;
+
+ bitmap_set_region(&r, maskp);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parselist);
+
+
+/**
+ * bitmap_parselist_user() - convert user buffer's list format ASCII
+ * string to bitmap
+ *
+ * @ubuf: pointer to user buffer containing string.
+ * @ulen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Wrapper for bitmap_parselist(), providing it with user buffer.
+ */
+int bitmap_parselist_user(const char __user *ubuf,
+ unsigned int ulen, unsigned long *maskp,
+ int nmaskbits)
+{
+ char *buf;
+ int ret;
+
+ buf = memdup_user_nul(ubuf, ulen);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = bitmap_parselist(buf, maskp, nmaskbits);
+
+ kfree(buf);
+ return ret;
+}
+EXPORT_SYMBOL(bitmap_parselist_user);
+
+static const char *bitmap_get_x32_reverse(const char *start,
+ const char *end, u32 *num)
+{
+ u32 ret = 0;
+ int c, i;
+
+ for (i = 0; i < 32; i += 4) {
+ c = hex_to_bin(*end--);
+ if (c < 0)
+ return ERR_PTR(-EINVAL);
+
+ ret |= c << i;
+
+ if (start > end || __end_of_region(*end))
+ goto out;
+ }
+
+ if (hex_to_bin(*end--) >= 0)
+ return ERR_PTR(-EOVERFLOW);
+out:
+ *num = ret;
+ return end;
+}
+
+/**
+ * bitmap_parse - convert an ASCII hex string into a bitmap.
+ * @start: pointer to buffer containing string.
+ * @buflen: buffer size in bytes. If string is smaller than this
+ * then it must be terminated with a \0 or \n. In that case,
+ * UINT_MAX may be provided instead of string length.
+ * @maskp: pointer to bitmap array that will contain result.
+ * @nmaskbits: size of bitmap, in bits.
+ *
+ * Commas group hex digits into chunks. Each chunk defines exactly 32
+ * bits of the resultant bitmask. No chunk may specify a value larger
+ * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
+ * then leading 0-bits are prepended. %-EINVAL is returned for illegal
+ * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
+ * Leading, embedded and trailing whitespace accepted.
+ */
+int bitmap_parse(const char *start, unsigned int buflen,
+ unsigned long *maskp, int nmaskbits)
+{
+ const char *end = strnchrnul(start, buflen, '\n') - 1;
+ int chunks = BITS_TO_U32(nmaskbits);
+ u32 *bitmap = (u32 *)maskp;
+ int unset_bit;
+ int chunk;
+
+ for (chunk = 0; ; chunk++) {
+ end = bitmap_find_region_reverse(start, end);
+ if (start > end)
+ break;
+
+ if (!chunks--)
+ return -EOVERFLOW;
+
+#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
+#else
+ end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
+#endif
+ if (IS_ERR(end))
+ return PTR_ERR(end);
+ }
+
+ unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
+ if (unset_bit < nmaskbits) {
+ bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
+ return 0;
+ }
+
+ if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
+ return -EOVERFLOW;
+
+ return 0;
+}
+EXPORT_SYMBOL(bitmap_parse);
diff --git a/lib/bitmap.c b/lib/bitmap.c
index ddb31015e3..09522af227 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -6,21 +6,10 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
-#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/device.h>
-#include <linux/errno.h>
#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/thread_info.h>
-#include <linux/uaccess.h>
-
-#include <asm/page.h>
-
-#include "kstrtox.h"
/**
* DOC: bitmap introduction
@@ -440,508 +429,6 @@ again:
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
-/*
- * Bitmap printing & parsing functions: first version by Nadia Yvette Chambers,
- * second version by Paul Jackson, third by Joe Korty.
- */
-
-/**
- * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- */
-int bitmap_parse_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- char *buf;
- int ret;
-
- buf = memdup_user_nul(ubuf, ulen);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_parse(buf, UINT_MAX, maskp, nmaskbits);
-
- kfree(buf);
- return ret;
-}
-EXPORT_SYMBOL(bitmap_parse_user);
-
-/**
- * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string
- * @list: indicates whether the bitmap must be list
- * @buf: page aligned buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- *
- * Output format is a comma-separated list of decimal numbers and
- * ranges if list is specified or hex digits grouped into comma-separated
- * sets of 8 digits/set. Returns the number of characters written to buf.
- *
- * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
- * area and that sufficient storage remains at @buf to accommodate the
- * bitmap_print_to_pagebuf() output. Returns the number of characters
- * actually printed to @buf, excluding terminating '\0'.
- */
-int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
- int nmaskbits)
-{
- ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
-
- return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
- scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
-}
-EXPORT_SYMBOL(bitmap_print_to_pagebuf);
-
-/**
- * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
- * @list: indicates whether the bitmap must be list
- * true: print in decimal list format
- * false: print in hexadecimal bitmask format
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- */
-static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- const char *fmt = list ? "%*pbl\n" : "%*pb\n";
- ssize_t size;
- void *data;
-
- data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
- if (!data)
- return -ENOMEM;
-
- size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
- kfree(data);
-
- return size;
-}
-
-/**
- * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- *
- * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
- * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
- * bitmask and decimal list to userspace by sysfs ABI.
- * Drivers might be using a normal attribute for this kind of ABIs. A
- * normal attribute typically has show entry as below::
- *
- * static ssize_t example_attribute_show(struct device *dev,
- * struct device_attribute *attr, char *buf)
- * {
- * ...
- * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
- * }
- *
- * show entry of attribute has no offset and count parameters and this
- * means the file is limited to one page only.
- * bitmap_print_to_pagebuf() API works terribly well for this kind of
- * normal attribute with buf parameter and without offset, count::
- *
- * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
- * int nmaskbits)
- * {
- * }
- *
- * The problem is once we have a large bitmap, we have a chance to get a
- * bitmask or list more than one page. Especially for list, it could be
- * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
- * It turns out bin_attribute is a way to break this limit. bin_attribute
- * has show entry as below::
- *
- * static ssize_t
- * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
- * struct bin_attribute *attr, char *buf,
- * loff_t offset, size_t count)
- * {
- * ...
- * }
- *
- * With the new offset and count parameters, this makes sysfs ABI be able
- * to support file size more than one page. For example, offset could be
- * >= 4096.
- * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
- * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
- * make those drivers be able to support large bitmask and list after they
- * move to use bin_attribute. In result, we have to pass the corresponding
- * parameters such as off, count from bin_attribute show entry to this API.
- *
- * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
- * is similar with cpumap_print_to_pagebuf(), the difference is that
- * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
- * the destination buffer is exactly one page and won't be more than one page.
- * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
- * hand, mainly serves bin_attribute which doesn't work with exact one page,
- * and it can break the size limit of converted decimal list and hexadecimal
- * bitmask.
- *
- * WARNING!
- *
- * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
- * It is intended to workaround sysfs limitations discussed above and should be
- * used carefully in general case for the following reasons:
- *
- * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
- * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
- * - @off and @count are NOT offset and number of bits to print.
- * - If printing part of bitmap as list, the resulting string is not a correct
- * list representation of bitmap. Particularly, some bits within or out of
- * related interval may be erroneously set or unset. The format of the string
- * may be broken, so bitmap_parselist-like parser may fail parsing it.
- * - If printing the whole bitmap as list by parts, user must ensure the order
- * of calls of the function such that the offset is incremented linearly.
- * - If printing the whole bitmap as list by parts, user must keep bitmap
- * unchanged between the very first and very last call. Otherwise concatenated
- * result may be incorrect, and format may be broken.
- *
- * Returns the number of characters actually printed to @buf
- */
-int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
-}
-EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
-
-/**
- * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
- * @buf: buffer into which string is placed
- * @maskp: pointer to bitmap to convert
- * @nmaskbits: size of bitmap, in bits
- * @off: in the string from which we are copying, We copy to @buf
- * @count: the maximum number of bytes to print
- *
- * Everything is same with the above bitmap_print_bitmask_to_buf() except
- * the print format.
- */
-int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count)
-{
- return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
-}
-EXPORT_SYMBOL(bitmap_print_list_to_buf);
-
-/*
- * Region 9-38:4/10 describes the following bitmap structure:
- * 0 9 12 18 38 N
- * .........****......****......****..................
- * ^ ^ ^ ^ ^
- * start off group_len end nbits
- */
-struct region {
- unsigned int start;
- unsigned int off;
- unsigned int group_len;
- unsigned int end;
- unsigned int nbits;
-};
-
-static void bitmap_set_region(const struct region *r, unsigned long *bitmap)
-{
- unsigned int start;
-
- for (start = r->start; start <= r->end; start += r->group_len)
- bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
-}
-
-static int bitmap_check_region(const struct region *r)
-{
- if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
- return -EINVAL;
-
- if (r->end >= r->nbits)
- return -ERANGE;
-
- return 0;
-}
-
-static const char *bitmap_getnum(const char *str, unsigned int *num,
- unsigned int lastbit)
-{
- unsigned long long n;
- unsigned int len;
-
- if (str[0] == 'N') {
- *num = lastbit;
- return str + 1;
- }
-
- len = _parse_integer(str, 10, &n);
- if (!len)
- return ERR_PTR(-EINVAL);
- if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
- return ERR_PTR(-EOVERFLOW);
-
- *num = n;
- return str + len;
-}
-
-static inline bool end_of_str(char c)
-{
- return c == '\0' || c == '\n';
-}
-
-static inline bool __end_of_region(char c)
-{
- return isspace(c) || c == ',';
-}
-
-static inline bool end_of_region(char c)
-{
- return __end_of_region(c) || end_of_str(c);
-}
-
-/*
- * The format allows commas and whitespaces at the beginning
- * of the region.
- */
-static const char *bitmap_find_region(const char *str)
-{
- while (__end_of_region(*str))
- str++;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-static const char *bitmap_find_region_reverse(const char *start, const char *end)
-{
- while (start <= end && __end_of_region(*end))
- end--;
-
- return end;
-}
-
-static const char *bitmap_parse_region(const char *str, struct region *r)
-{
- unsigned int lastbit = r->nbits - 1;
-
- if (!strncasecmp(str, "all", 3)) {
- r->start = 0;
- r->end = lastbit;
- str += 3;
-
- goto check_pattern;
- }
-
- str = bitmap_getnum(str, &r->start, lastbit);
- if (IS_ERR(str))
- return str;
-
- if (end_of_region(*str))
- goto no_end;
-
- if (*str != '-')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->end, lastbit);
- if (IS_ERR(str))
- return str;
-
-check_pattern:
- if (end_of_region(*str))
- goto no_pattern;
-
- if (*str != ':')
- return ERR_PTR(-EINVAL);
-
- str = bitmap_getnum(str + 1, &r->off, lastbit);
- if (IS_ERR(str))
- return str;
-
- if (*str != '/')
- return ERR_PTR(-EINVAL);
-
- return bitmap_getnum(str + 1, &r->group_len, lastbit);
-
-no_end:
- r->end = r->start;
-no_pattern:
- r->off = r->end + 1;
- r->group_len = r->end + 1;
-
- return end_of_str(*str) ? NULL : str;
-}
-
-/**
- * bitmap_parselist - convert list format ASCII string to bitmap
- * @buf: read user string from this buffer; must be terminated
- * with a \0 or \n.
- * @maskp: write resulting mask here
- * @nmaskbits: number of bits in mask to be written
- *
- * Input format is a comma-separated list of decimal numbers and
- * ranges. Consecutively set bits are shown as two hyphen-separated
- * decimal numbers, the smallest and largest bit numbers set in
- * the range.
- * Optionally each range can be postfixed to denote that only parts of it
- * should be set. The range will divided to groups of specific size.
- * From each group will be used only defined amount of bits.
- * Syntax: range:used_size/group_size
- * Example: 0-1023:2/256 ==> 0,1,256,257,512,513,768,769
- * The value 'N' can be used as a dynamically substituted token for the
- * maximum allowed value; i.e (nmaskbits - 1). Keep in mind that it is
- * dynamic, so if system changes cause the bitmap width to change, such
- * as more cores in a CPU list, then any ranges using N will also change.
- *
- * Returns: 0 on success, -errno on invalid input strings. Error values:
- *
- * - ``-EINVAL``: wrong region format
- * - ``-EINVAL``: invalid character in string
- * - ``-ERANGE``: bit number specified too large for mask
- * - ``-EOVERFLOW``: integer overflow in the input parameters
- */
-int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
-{
- struct region r;
- long ret;
-
- r.nbits = nmaskbits;
- bitmap_zero(maskp, r.nbits);
-
- while (buf) {
- buf = bitmap_find_region(buf);
- if (buf == NULL)
- return 0;
-
- buf = bitmap_parse_region(buf, &r);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_check_region(&r);
- if (ret)
- return ret;
-
- bitmap_set_region(&r, maskp);
- }
-
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parselist);
-
-
-/**
- * bitmap_parselist_user() - convert user buffer's list format ASCII
- * string to bitmap
- *
- * @ubuf: pointer to user buffer containing string.
- * @ulen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Wrapper for bitmap_parselist(), providing it with user buffer.
- */
-int bitmap_parselist_user(const char __user *ubuf,
- unsigned int ulen, unsigned long *maskp,
- int nmaskbits)
-{
- char *buf;
- int ret;
-
- buf = memdup_user_nul(ubuf, ulen);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- ret = bitmap_parselist(buf, maskp, nmaskbits);
-
- kfree(buf);
- return ret;
-}
-EXPORT_SYMBOL(bitmap_parselist_user);
-
-static const char *bitmap_get_x32_reverse(const char *start,
- const char *end, u32 *num)
-{
- u32 ret = 0;
- int c, i;
-
- for (i = 0; i < 32; i += 4) {
- c = hex_to_bin(*end--);
- if (c < 0)
- return ERR_PTR(-EINVAL);
-
- ret |= c << i;
-
- if (start > end || __end_of_region(*end))
- goto out;
- }
-
- if (hex_to_bin(*end--) >= 0)
- return ERR_PTR(-EOVERFLOW);
-out:
- *num = ret;
- return end;
-}
-
-/**
- * bitmap_parse - convert an ASCII hex string into a bitmap.
- * @start: pointer to buffer containing string.
- * @buflen: buffer size in bytes. If string is smaller than this
- * then it must be terminated with a \0 or \n. In that case,
- * UINT_MAX may be provided instead of string length.
- * @maskp: pointer to bitmap array that will contain result.
- * @nmaskbits: size of bitmap, in bits.
- *
- * Commas group hex digits into chunks. Each chunk defines exactly 32
- * bits of the resultant bitmask. No chunk may specify a value larger
- * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value
- * then leading 0-bits are prepended. %-EINVAL is returned for illegal
- * characters. Grouping such as "1,,5", ",44", "," or "" is allowed.
- * Leading, embedded and trailing whitespace accepted.
- */
-int bitmap_parse(const char *start, unsigned int buflen,
- unsigned long *maskp, int nmaskbits)
-{
- const char *end = strnchrnul(start, buflen, '\n') - 1;
- int chunks = BITS_TO_U32(nmaskbits);
- u32 *bitmap = (u32 *)maskp;
- int unset_bit;
- int chunk;
-
- for (chunk = 0; ; chunk++) {
- end = bitmap_find_region_reverse(start, end);
- if (start > end)
- break;
-
- if (!chunks--)
- return -EOVERFLOW;
-
-#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
- end = bitmap_get_x32_reverse(start, end, &bitmap[chunk ^ 1]);
-#else
- end = bitmap_get_x32_reverse(start, end, &bitmap[chunk]);
-#endif
- if (IS_ERR(end))
- return PTR_ERR(end);
- }
-
- unset_bit = (BITS_TO_U32(nmaskbits) - chunks) * 32;
- if (unset_bit < nmaskbits) {
- bitmap_clear(maskp, unset_bit, nmaskbits - unset_bit);
- return 0;
- }
-
- if (find_next_bit(maskp, unset_bit, nmaskbits) != unset_bit)
- return -EOVERFLOW;
-
- return 0;
-}
-EXPORT_SYMBOL(bitmap_parse);
-
/**
* bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
* @buf: pointer to a bitmap
@@ -988,7 +475,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigne
* to @dst.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to @src, placing the result in
* @dst, clearing any bits previously set in @dst.
@@ -1037,7 +524,7 @@ EXPORT_SYMBOL(bitmap_remap);
* the position of the m-th set bit in @new, where m == n % w.
*
* The positions of unset bits in @old are mapped to themselves
- * (the identify map).
+ * (the identity map).
*
* Apply the above specified mapping to bit position @oldbit, returning
* the new bit position.
@@ -1220,169 +707,6 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
}
#endif /* CONFIG_NUMA */
-/*
- * Common code for bitmap_*_region() routines.
- * bitmap: array of unsigned longs corresponding to the bitmap
- * pos: the beginning of the region
- * order: region size (log base 2 of number of bits)
- * reg_op: operation(s) to perform on that region of bitmap
- *
- * Can set, verify and/or release a region of bits in a bitmap,
- * depending on which combination of REG_OP_* flag bits is set.
- *
- * A region of a bitmap is a sequence of bits in the bitmap, of
- * some size '1 << order' (a power of two), aligned to that same
- * '1 << order' power of two.
- *
- * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits).
- * Returns 0 in all other cases and reg_ops.
- */
-
-enum {
- REG_OP_ISFREE, /* true if region is all zero bits */
- REG_OP_ALLOC, /* set all bits in region */
- REG_OP_RELEASE, /* clear all bits in region */
-};
-
-static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op)
-{
- int nbits_reg; /* number of bits in region */
- int index; /* index first long of region in bitmap */
- int offset; /* bit offset region in bitmap[index] */
- int nlongs_reg; /* num longs spanned by region in bitmap */
- int nbitsinlong; /* num bits of region in each spanned long */
- unsigned long mask; /* bitmask for one long of region */
- int i; /* scans bitmap by longs */
- int ret = 0; /* return value */
-
- /*
- * Either nlongs_reg == 1 (for small orders that fit in one long)
- * or (offset == 0 && mask == ~0UL) (for larger multiword orders.)
- */
- nbits_reg = 1 << order;
- index = pos / BITS_PER_LONG;
- offset = pos - (index * BITS_PER_LONG);
- nlongs_reg = BITS_TO_LONGS(nbits_reg);
- nbitsinlong = min(nbits_reg, BITS_PER_LONG);
-
- /*
- * Can't do "mask = (1UL << nbitsinlong) - 1", as that
- * overflows if nbitsinlong == BITS_PER_LONG.
- */
- mask = (1UL << (nbitsinlong - 1));
- mask += mask - 1;
- mask <<= offset;
-
- switch (reg_op) {
- case REG_OP_ISFREE:
- for (i = 0; i < nlongs_reg; i++) {
- if (bitmap[index + i] & mask)
- goto done;
- }
- ret = 1; /* all bits in region free (zero) */
- break;
-
- case REG_OP_ALLOC:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] |= mask;
- break;
-
- case REG_OP_RELEASE:
- for (i = 0; i < nlongs_reg; i++)
- bitmap[index + i] &= ~mask;
- break;
- }
-done:
- return ret;
-}
-
-/**
- * bitmap_find_free_region - find a contiguous aligned mem region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @bits: number of bits in the bitmap
- * @order: region size (log base 2 of number of bits) to find
- *
- * Find a region of free (zero) bits in a @bitmap of @bits bits and
- * allocate them (set them to one). Only consider regions of length
- * a power (@order) of two, aligned to that power of two, which
- * makes the search algorithm much faster.
- *
- * Return the bit offset in bitmap of the allocated region,
- * or -errno on failure.
- */
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
-{
- unsigned int pos, end; /* scans bitmap by regions of size order */
-
- for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) {
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- continue;
- __reg_op(bitmap, pos, order, REG_OP_ALLOC);
- return pos;
- }
- return -ENOMEM;
-}
-EXPORT_SYMBOL(bitmap_find_free_region);
-
-/**
- * bitmap_release_region - release allocated bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to release
- * @order: region size (log base 2 of number of bits) to release
- *
- * This is the complement to __bitmap_find_free_region() and releases
- * the found region (by clearing it in the bitmap).
- *
- * No return value.
- */
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
-{
- __reg_op(bitmap, pos, order, REG_OP_RELEASE);
-}
-EXPORT_SYMBOL(bitmap_release_region);
-
-/**
- * bitmap_allocate_region - allocate bitmap region
- * @bitmap: array of unsigned longs corresponding to the bitmap
- * @pos: beginning of bit region to allocate
- * @order: region size (log base 2 of number of bits) to allocate
- *
- * Allocate (set bits in) a specified region of a bitmap.
- *
- * Return 0 on success, or %-EBUSY if specified region wasn't
- * free (not all bits were zero).
- */
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
-{
- if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
- return -EBUSY;
- return __reg_op(bitmap, pos, order, REG_OP_ALLOC);
-}
-EXPORT_SYMBOL(bitmap_allocate_region);
-
-/**
- * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
- * @dst: destination buffer
- * @src: bitmap to copy
- * @nbits: number of bits in the bitmap
- *
- * Require nbits % BITS_PER_LONG == 0.
- */
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits)
-{
- unsigned int i;
-
- for (i = 0; i < nbits/BITS_PER_LONG; i++) {
- if (BITS_PER_LONG == 64)
- dst[i] = cpu_to_le64(src[i]);
- else
- dst[i] = cpu_to_le32(src[i]);
- }
-}
-EXPORT_SYMBOL(bitmap_copy_le);
-#endif
-
unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
{
return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
diff --git a/drivers/md/bcache/closure.c b/lib/closure.c
index d8d9394a6b..c16540552d 100644
--- a/drivers/md/bcache/closure.c
+++ b/lib/closure.c
@@ -6,13 +6,13 @@
* Copyright 2012 Google, Inc.
*/
+#include <linux/closure.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
-#include "closure.h"
-
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
int r = flags & CLOSURE_REMAINING_MASK;
@@ -21,6 +21,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
if (!r) {
+ smp_acquire__after_ctrl_dep();
+
+ cl->closure_get_happened = false;
+
if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
atomic_set(&cl->remaining,
CLOSURE_REMAINING_INITIALIZER);
@@ -32,7 +36,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
closure_debug_destroy(cl);
if (destructor)
- destructor(cl);
+ destructor(&cl->work);
if (parent)
closure_put(parent);
@@ -43,16 +47,18 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
/* For clearing flags with the same atomic op as a put */
void closure_sub(struct closure *cl, int v)
{
- closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+ closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
}
+EXPORT_SYMBOL(closure_sub);
/*
* closure_put - decrement a closure's refcount
*/
void closure_put(struct closure *cl)
{
- closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+ closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
}
+EXPORT_SYMBOL(closure_put);
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
@@ -74,6 +80,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
closure_sub(cl, CLOSURE_WAITING + 1);
}
}
+EXPORT_SYMBOL(__closure_wake_up);
/**
* closure_wait - add a closure to a waitlist
@@ -87,20 +94,23 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
return false;
+ cl->closure_get_happened = true;
closure_set_waiting(cl, _RET_IP_);
atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
llist_add(&cl->list, &waitlist->list);
return true;
}
+EXPORT_SYMBOL(closure_wait);
struct closure_syncer {
struct task_struct *task;
int done;
};
-static void closure_sync_fn(struct closure *cl)
+static CLOSURE_CALLBACK(closure_sync_fn)
{
+ struct closure *cl = container_of(ws, struct closure, work);
struct closure_syncer *s = cl->s;
struct task_struct *p;
@@ -127,8 +137,9 @@ void __sched __closure_sync(struct closure *cl)
__set_current_state(TASK_RUNNING);
}
+EXPORT_SYMBOL(__closure_sync);
-#ifdef CONFIG_BCACHE_CLOSURES_DEBUG
+#ifdef CONFIG_DEBUG_CLOSURES
static LIST_HEAD(closure_list);
static DEFINE_SPINLOCK(closure_list_lock);
@@ -144,6 +155,7 @@ void closure_debug_create(struct closure *cl)
list_add(&cl->all, &closure_list);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
+EXPORT_SYMBOL(closure_debug_create);
void closure_debug_destroy(struct closure *cl)
{
@@ -156,8 +168,7 @@ void closure_debug_destroy(struct closure *cl)
list_del(&cl->all);
spin_unlock_irqrestore(&closure_list_lock, flags);
}
-
-static struct dentry *closure_debug;
+EXPORT_SYMBOL(closure_debug_destroy);
static int debug_show(struct seq_file *f, void *data)
{
@@ -181,7 +192,7 @@ static int debug_show(struct seq_file *f, void *data)
seq_printf(f, " W %pS\n",
(void *) cl->waiting_on);
- seq_printf(f, "\n");
+ seq_puts(f, "\n");
}
spin_unlock_irq(&closure_list_lock);
@@ -190,18 +201,11 @@ static int debug_show(struct seq_file *f, void *data)
DEFINE_SHOW_ATTRIBUTE(debug);
-void __init closure_debug_init(void)
+static int __init closure_debug_init(void)
{
- if (!IS_ERR_OR_NULL(bcache_debug))
- /*
- * it is unnecessary to check return value of
- * debugfs_create_file(), we should not care
- * about this.
- */
- closure_debug = debugfs_create_file(
- "closures", 0400, bcache_debug, NULL, &debug_fops);
+ debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
+ return 0;
}
-#endif
+late_initcall(closure_debug_init)
-MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
-MODULE_LICENSE("GPL");
+#endif
diff --git a/lib/cpumask.c b/lib/cpumask.c
index a7fd02b5ae..e77ee9d46f 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -14,7 +14,7 @@
* @start: the start point of the iteration
* @wrap: assume @n crossing @start terminates the iteration
*
- * Returns >= nr_cpu_ids on completion
+ * Return: >= nr_cpu_ids on completion
*
* Note: the @wrap argument is required for the start condition when
* we cannot assume @start is set in @mask.
@@ -48,8 +48,9 @@ EXPORT_SYMBOL(cpumask_next_wrap);
* @node: memory node from which to allocate or %NUMA_NO_NODE
*
* Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
- * a nop returning a constant 1 (in <linux/cpumask.h>)
- * Returns TRUE if memory allocation succeeded, FALSE otherwise.
+ * a nop returning a constant 1 (in <linux/cpumask.h>).
+ *
+ * Return: TRUE if memory allocation succeeded, FALSE otherwise.
*
* In addition, mask will be NULL if this fails. Note that gcc is
* usually smart enough to know that mask can never be NULL if
@@ -115,7 +116,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
* @i: index number
* @node: local numa_node
*
- * Returns online CPU according to a numa aware policy; local cpus are returned
+ * Return: online CPU according to a numa aware policy; local cpus are returned
* first, followed by non-local ones, then it wraps around.
*
* For those who wants to enumerate all CPUs based on their NUMA distances,
@@ -146,9 +147,7 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
/* Wrap: we always want a cpu. */
i %= num_online_cpus();
- cpu = (node == NUMA_NO_NODE) ?
- cpumask_nth(i, cpu_online_mask) :
- sched_numa_find_nth_cpu(cpu_online_mask, i, node);
+ cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
WARN_ON(cpu >= nr_cpu_ids);
return cpu;
@@ -165,7 +164,7 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
*
- * Returns >= nr_cpu_ids if the intersection is empty.
+ * Return: >= nr_cpu_ids if the intersection is empty.
*/
unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -184,6 +183,12 @@ unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
}
EXPORT_SYMBOL(cpumask_any_and_distribute);
+/**
+ * cpumask_any_distribute - Return an arbitrary cpu from srcp
+ * @srcp: &cpumask for selection
+ *
+ * Return: >= nr_cpu_ids if the intersection is empty.
+ */
unsigned int cpumask_any_distribute(const struct cpumask *srcp)
{
unsigned int next, prev;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a517256a27..fb12a9bacd 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -89,7 +89,7 @@ static int debug_objects_pool_size __read_mostly
static int debug_objects_pool_min_level __read_mostly
= ODEBUG_POOL_MIN_LEVEL;
static const struct debug_obj_descr *descr_test __read_mostly;
-static struct kmem_cache *obj_cache __read_mostly;
+static struct kmem_cache *obj_cache __ro_after_init;
/*
* Track numbers of kmem_cache_alloc()/free() calls done.
@@ -620,9 +620,8 @@ static void debug_objects_fill_pool(void)
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
debug_objects_fill_pool();
@@ -643,24 +642,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
- break;
-
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
- debug_object_fixup(descr->fixup_init, addr, state);
- return;
-
- case ODEBUG_STATE_DESTROYED:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
return;
default:
break;
}
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "init");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_init, addr, o.state);
}
/**
@@ -701,11 +694,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
- enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- int ret;
if (!debug_objects_enabled)
return 0;
@@ -717,49 +708,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object_or_alloc(addr, db, descr, false, true);
- if (likely(!IS_ERR_OR_NULL(obj))) {
- bool print_object = false;
-
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return 0;
+ } else if (likely(!IS_ERR(obj))) {
switch (obj->state) {
- case ODEBUG_STATE_INIT:
- case ODEBUG_STATE_INACTIVE:
- obj->state = ODEBUG_STATE_ACTIVE;
- ret = 0;
- break;
-
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, state);
- return ret ? 0 : -EINVAL;
-
case ODEBUG_STATE_DESTROYED:
- print_object = true;
- ret = -EINVAL;
+ o = *obj;
break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ fallthrough;
default:
- ret = 0;
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return 0;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "activate");
- return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "activate");
- /* If NULL the allocation has hit OOM */
- if (!obj) {
- debug_objects_oom();
- return 0;
+ switch (o.state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
+ return 0;
+ fallthrough;
+ default:
+ return -EINVAL;
}
-
- /* Object is neither static nor tracked. It's not initialized */
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -770,10 +750,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
*/
void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -785,33 +765,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- if (!obj->astate)
- obj->state = ODEBUG_STATE_INACTIVE;
- else
- print_object = true;
- break;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
+ if (obj->astate)
+ break;
+ obj->state = ODEBUG_STATE_INACTIVE;
+ fallthrough;
default:
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "deactivate");
- } else if (print_object) {
- debug_print_object(obj, "deactivate");
- }
+ debug_print_object(&o, "deactivate");
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -822,11 +793,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
*/
void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -836,32 +805,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
- break;
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
+ fallthrough;
+ default:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "destroy");
- debug_object_fixup(descr->fixup_destroy, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
- default:
- break;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "destroy");
+ debug_print_object(&o, "destroy");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_destroy, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -872,9 +840,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
*/
void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -885,24 +852,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free, addr, state);
- return;
+ break;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "free");
+
+ debug_object_fixup(descr->fixup_free, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_free);
@@ -954,10 +923,10 @@ void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -970,28 +939,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- if (obj->astate == expect)
- obj->astate = next;
- else
- print_object = true;
- break;
-
+ if (obj->astate != expect)
+ break;
+ obj->astate = next;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
default:
- print_object = true;
break;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "active_state");
- } else if (print_object) {
- debug_print_object(obj, "active_state");
- }
+ debug_print_object(&o, "active_state");
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -999,12 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- const struct debug_obj_descr *descr;
- enum debug_obj_state state;
+ int cnt, objs_checked = 0;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
struct hlist_node *tmp;
- struct debug_obj *obj;
- int cnt, objs_checked = 0;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -1026,12 +984,10 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- descr = obj->descr;
- state = obj->state;
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free,
- (void *) oaddr, state);
+ debug_print_object(&o, "free");
+ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
goto repeat;
default:
hlist_del(&obj->node);
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
index 353268b9f1..8428941589 100644
--- a/lib/decompress_unxz.c
+++ b/lib/decompress_unxz.c
@@ -133,9 +133,6 @@
#ifdef CONFIG_ARM
# define XZ_DEC_ARM
#endif
-#ifdef CONFIG_IA64
-# define XZ_DEC_IA64
-#endif
#ifdef CONFIG_SPARC
# define XZ_DEC_SPARC
#endif
diff --git a/lib/errname.c b/lib/errname.c
index 0c336b0f12..4f9112b38f 100644
--- a/lib/errname.c
+++ b/lib/errname.c
@@ -222,3 +222,4 @@ const char *errname(int err)
return err > 0 ? name + 1 : name;
}
+EXPORT_SYMBOL(errname);
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index c035fde66a..7ee468ef21 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -98,7 +98,7 @@ config FONT_10x18
config FONT_SUN8x16
bool "Sparc console 8x16 font"
- depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
+ depends on (FRAMEBUFFER_CONSOLE && (FONTS || SPARC)) || BOOTX_TEXT
help
This is the high resolution console font for Sun machines. Say Y.
diff --git a/lib/fw_table.c b/lib/fw_table.c
new file mode 100644
index 0000000000..c49a09ee38
--- /dev/null
+++ b/lib/fw_table.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * fw_tables.c - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+enum acpi_subtable_type {
+ ACPI_SUBTABLE_COMMON,
+ ACPI_SUBTABLE_HMAT,
+ ACPI_SUBTABLE_PRMT,
+ ACPI_SUBTABLE_CEDT,
+};
+
+struct acpi_subtable_entry {
+ union acpi_subtable_headers *hdr;
+ enum acpi_subtable_type type;
+};
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.type;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.type;
+ case ACPI_SUBTABLE_PRMT:
+ return 0;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.type;
+ }
+ return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return entry->hdr->common.length;
+ case ACPI_SUBTABLE_HMAT:
+ return entry->hdr->hmat.length;
+ case ACPI_SUBTABLE_PRMT:
+ return entry->hdr->prmt.length;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.length;
+ }
+ return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+ switch (entry->type) {
+ case ACPI_SUBTABLE_COMMON:
+ return sizeof(entry->hdr->common);
+ case ACPI_SUBTABLE_HMAT:
+ return sizeof(entry->hdr->hmat);
+ case ACPI_SUBTABLE_PRMT:
+ return sizeof(entry->hdr->prmt);
+ case ACPI_SUBTABLE_CEDT:
+ return sizeof(entry->hdr->cedt);
+ }
+ return 0;
+}
+
+static enum acpi_subtable_type __init_or_acpilib
+acpi_get_subtable_type(char *id)
+{
+ if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+ return ACPI_SUBTABLE_HMAT;
+ if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+ return ACPI_SUBTABLE_PRMT;
+ if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+ return ACPI_SUBTABLE_CEDT;
+ return ACPI_SUBTABLE_COMMON;
+}
+
+static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
+{
+ if (proc->handler)
+ return proc->handler(hdr, end);
+ if (proc->handler_arg)
+ return proc->handler_arg(hdr, proc->arg, end);
+ return -EINVAL;
+}
+
+/**
+ * acpi_parse_entries_array - for each proc_num find a suitable subtable
+ *
+ * @id: table id (for debugging purposes)
+ * @table_size: size of the root table
+ * @table_header: where does the table start?
+ * @proc: array of acpi_subtable_proc struct containing entry id
+ * and associated handler with it
+ * @proc_num: how big proc is?
+ * @max_entries: how many entries can we process?
+ *
+ * For each proc_num find a subtable with proc->id and run proc->handler
+ * on it. Assumption is that there's only single handler for particular
+ * entry id.
+ *
+ * The table_size is not the size of the complete ACPI table (the length
+ * field in the header struct), but only the size of the root table; i.e.,
+ * the offset from the very first byte of the complete ACPI table, to the
+ * first byte of the very first subtable.
+ *
+ * On success returns sum of all matching entries for all proc handlers.
+ * Otherwise, -ENODEV or -EINVAL is returned.
+ */
+int __init_or_acpilib
+acpi_parse_entries_array(char *id, unsigned long table_size,
+ struct acpi_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
+{
+ unsigned long table_end, subtable_len, entry_len;
+ struct acpi_subtable_entry entry;
+ int count = 0;
+ int i;
+
+ table_end = (unsigned long)table_header + table_header->length;
+
+ /* Parse all entries looking for a match. */
+
+ entry.type = acpi_get_subtable_type(id);
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)table_header + table_size);
+ subtable_len = acpi_get_subtable_header_length(&entry);
+
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
+ for (i = 0; i < proc_num; i++) {
+ if (acpi_get_entry_type(&entry) != proc[i].id)
+ continue;
+
+ if (!max_entries || count < max_entries)
+ if (call_handler(&proc[i], entry.hdr, table_end))
+ return -EINVAL;
+
+ proc[i].count++;
+ count++;
+ break;
+ }
+
+ /*
+ * If entry->length is 0, break from this loop to avoid
+ * infinite loop.
+ */
+ entry_len = acpi_get_entry_length(&entry);
+ if (entry_len == 0) {
+ pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
+ return -EINVAL;
+ }
+
+ entry.hdr = (union acpi_subtable_headers *)
+ ((unsigned long)entry.hdr + entry_len);
+ }
+
+ if (max_entries && count > max_entries) {
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, proc->id, count - max_entries, count);
+ }
+
+ return count;
+}
diff --git a/lib/generic-radix-tree.c b/lib/generic-radix-tree.c
index 7dfa88282b..41f1bcdc44 100644
--- a/lib/generic-radix-tree.c
+++ b/lib/generic-radix-tree.c
@@ -1,4 +1,5 @@
+#include <linux/atomic.h>
#include <linux/export.h>
#include <linux/generic-radix-tree.h>
#include <linux/gfp.h>
@@ -212,6 +213,64 @@ restart:
}
EXPORT_SYMBOL(__genradix_iter_peek);
+void *__genradix_iter_peek_prev(struct genradix_iter *iter,
+ struct __genradix *radix,
+ size_t objs_per_page,
+ size_t obj_size_plus_page_remainder)
+{
+ struct genradix_root *r;
+ struct genradix_node *n;
+ unsigned level, i;
+
+ if (iter->offset == SIZE_MAX)
+ return NULL;
+
+restart:
+ r = READ_ONCE(radix->root);
+ if (!r)
+ return NULL;
+
+ n = genradix_root_to_node(r);
+ level = genradix_root_to_depth(r);
+
+ if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
+ iter->offset = genradix_depth_size(level);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+ }
+
+ while (level) {
+ level--;
+
+ i = (iter->offset >> genradix_depth_shift(level)) &
+ (GENRADIX_ARY - 1);
+
+ while (!n->children[i]) {
+ size_t objs_per_ptr = genradix_depth_size(level);
+
+ iter->offset = round_down(iter->offset, objs_per_ptr);
+ iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
+
+ if (!iter->offset)
+ return NULL;
+
+ iter->offset -= obj_size_plus_page_remainder;
+ iter->pos--;
+
+ if (!i)
+ goto restart;
+ --i;
+ }
+
+ n = n->children[i];
+ }
+
+ return &n->data[iter->offset & (PAGE_SIZE - 1)];
+}
+EXPORT_SYMBOL(__genradix_iter_peek_prev);
+
static void genradix_free_recurse(struct genradix_node *n, unsigned level)
{
if (level) {
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 27234a820e..8ff6824a10 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1,5 +1,4 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <crypto/hash.h>
#include <linux/export.h>
#include <linux/bvec.h>
#include <linux/fault-inject-usercopy.h>
@@ -10,192 +9,71 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <linux/compat.h>
-#include <net/checksum.h>
#include <linux/scatterlist.h>
#include <linux/instrumented.h>
+#include <linux/iov_iter.h>
-/* covers ubuf and kbuf alike */
-#define iterate_buf(i, n, base, len, off, __p, STEP) { \
- size_t __maybe_unused off = 0; \
- len = n; \
- base = __p + i->iov_offset; \
- len -= (STEP); \
- i->iov_offset += len; \
- n = len; \
-}
-
-/* covers iovec and kvec alike */
-#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
- size_t off = 0; \
- size_t skip = i->iov_offset; \
- do { \
- len = min(n, __p->iov_len - skip); \
- if (likely(len)) { \
- base = __p->iov_base + skip; \
- len -= (STEP); \
- off += len; \
- skip += len; \
- n -= len; \
- if (skip < __p->iov_len) \
- break; \
- } \
- __p++; \
- skip = 0; \
- } while (n); \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_bvec(i, n, base, len, off, p, STEP) { \
- size_t off = 0; \
- unsigned skip = i->iov_offset; \
- while (n) { \
- unsigned offset = p->bv_offset + skip; \
- unsigned left; \
- void *kaddr = kmap_local_page(p->bv_page + \
- offset / PAGE_SIZE); \
- base = kaddr + offset % PAGE_SIZE; \
- len = min(min(n, (size_t)(p->bv_len - skip)), \
- (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
- left = (STEP); \
- kunmap_local(kaddr); \
- len -= left; \
- off += len; \
- skip += len; \
- if (skip == p->bv_len) { \
- skip = 0; \
- p++; \
- } \
- n -= len; \
- if (left) \
- break; \
- } \
- i->iov_offset = skip; \
- n = off; \
-}
-
-#define iterate_xarray(i, n, base, len, __off, STEP) { \
- __label__ __out; \
- size_t __off = 0; \
- struct folio *folio; \
- loff_t start = i->xarray_start + i->iov_offset; \
- pgoff_t index = start / PAGE_SIZE; \
- XA_STATE(xas, i->xarray, index); \
- \
- len = PAGE_SIZE - offset_in_page(start); \
- rcu_read_lock(); \
- xas_for_each(&xas, folio, ULONG_MAX) { \
- unsigned left; \
- size_t offset; \
- if (xas_retry(&xas, folio)) \
- continue; \
- if (WARN_ON(xa_is_value(folio))) \
- break; \
- if (WARN_ON(folio_test_hugetlb(folio))) \
- break; \
- offset = offset_in_folio(folio, start + __off); \
- while (offset < folio_size(folio)) { \
- base = kmap_local_folio(folio, offset); \
- len = min(n, len); \
- left = (STEP); \
- kunmap_local(base); \
- len -= left; \
- __off += len; \
- n -= len; \
- if (left || n == 0) \
- goto __out; \
- offset += len; \
- len = PAGE_SIZE; \
- } \
- } \
-__out: \
- rcu_read_unlock(); \
- i->iov_offset += __off; \
- n = __off; \
-}
-
-#define __iterate_and_advance(i, n, base, len, off, I, K) { \
- if (unlikely(i->count < n)) \
- n = i->count; \
- if (likely(n)) { \
- if (likely(iter_is_ubuf(i))) { \
- void __user *base; \
- size_t len; \
- iterate_buf(i, n, base, len, off, \
- i->ubuf, (I)) \
- } else if (likely(iter_is_iovec(i))) { \
- const struct iovec *iov = iter_iov(i); \
- void __user *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- iov, (I)) \
- i->nr_segs -= iov - iter_iov(i); \
- i->__iov = iov; \
- } else if (iov_iter_is_bvec(i)) { \
- const struct bio_vec *bvec = i->bvec; \
- void *base; \
- size_t len; \
- iterate_bvec(i, n, base, len, off, \
- bvec, (K)) \
- i->nr_segs -= bvec - i->bvec; \
- i->bvec = bvec; \
- } else if (iov_iter_is_kvec(i)) { \
- const struct kvec *kvec = i->kvec; \
- void *base; \
- size_t len; \
- iterate_iovec(i, n, base, len, off, \
- kvec, (K)) \
- i->nr_segs -= kvec - i->kvec; \
- i->kvec = kvec; \
- } else if (iov_iter_is_xarray(i)) { \
- void *base; \
- size_t len; \
- iterate_xarray(i, n, base, len, off, \
- (K)) \
- } \
- i->count -= n; \
- } \
-}
-#define iterate_and_advance(i, n, base, len, off, I, K) \
- __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
-
-static int copyout(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
if (should_fail_usercopy())
- return n;
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = raw_copy_to_user(to, from, n);
+ return len;
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = raw_copy_to_user(iter_to, from, len);
}
- return n;
+ return len;
}
-static int copyout_nofault(void __user *to, const void *from, size_t n)
+static __always_inline
+size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
{
- long res;
+ ssize_t res;
if (should_fail_usercopy())
- return n;
-
- res = copy_to_user_nofault(to, from, n);
+ return len;
- return res < 0 ? n : res;
+ from += progress;
+ res = copy_to_user_nofault(iter_to, from, len);
+ return res < 0 ? len : res;
}
-static int copyin(void *to, const void __user *from, size_t n)
+static __always_inline
+size_t copy_from_user_iter(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
{
- size_t res = n;
+ size_t res = len;
if (should_fail_usercopy())
- return n;
- if (access_ok(from, n)) {
- instrument_copy_from_user_before(to, from, n);
- res = raw_copy_from_user(to, from, n);
- instrument_copy_from_user_after(to, from, n, res);
+ return len;
+ if (access_ok(iter_from, len)) {
+ to += progress;
+ instrument_copy_from_user_before(to, iter_from, len);
+ res = raw_copy_from_user(to, iter_from, len);
+ instrument_copy_from_user_after(to, iter_from, len, res);
}
return res;
}
+static __always_inline
+size_t memcpy_to_iter(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ memcpy(iter_to, from + progress, len);
+ return 0;
+}
+
+static __always_inline
+size_t memcpy_from_iter(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy(to + progress, iter_from, len);
+ return 0;
+}
+
/*
* fault_in_iov_iter_readable - fault in iov iterator for reading
* @i: iterator
@@ -290,7 +168,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
.iter_type = ITER_IOVEC,
.copy_mc = false,
.nofault = false,
- .user_backed = true,
.data_source = direction,
.__iov = iov,
.nr_segs = nr_segs,
@@ -300,36 +177,35 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
}
EXPORT_SYMBOL(iov_iter_init);
-static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
- __wsum sum, size_t off)
-{
- __wsum next = csum_partial_copy_nocheck(from, to, len);
- return csum_block_add(sum, next, off);
-}
-
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(i->data_source))
return 0;
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyout(base, addr + off, len),
- memcpy(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter, memcpy_to_iter);
}
EXPORT_SYMBOL(_copy_to_iter);
#ifdef CONFIG_ARCH_HAS_COPY_MC
-static int copyout_mc(void __user *to, const void *from, size_t n)
-{
- if (access_ok(to, n)) {
- instrument_copy_to_user(to, from, n);
- n = copy_mc_to_user((__force void *) to, from, n);
+static __always_inline
+size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ if (access_ok(iter_to, len)) {
+ from += progress;
+ instrument_copy_to_user(iter_to, from, len);
+ len = copy_mc_to_user(iter_to, from, len);
}
- return n;
+ return len;
+}
+
+static __always_inline
+size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
+ size_t len, void *from, void *priv2)
+{
+ return copy_mc_to_kernel(iter_to, from + progress, len);
}
/**
@@ -362,22 +238,35 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
return 0;
if (user_backed_iter(i))
might_fault();
- __iterate_and_advance(i, bytes, base, len, off,
- copyout_mc(base, addr + off, len),
- copy_mc_to_kernel(base, addr + off, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, (void *)addr,
+ copy_to_user_iter_mc, memcpy_to_iter_mc);
}
EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
#endif /* CONFIG_ARCH_HAS_COPY_MC */
-static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from,
- size_t size)
+static __always_inline
+size_t memcpy_from_iter_mc(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return copy_mc_to_kernel(to + progress, iter_from, len);
+}
+
+static size_t __copy_from_iter_mc(void *addr, size_t bytes, struct iov_iter *i)
+{
+ if (unlikely(i->count < bytes))
+ bytes = i->count;
+ if (unlikely(!bytes))
+ return 0;
+ return iterate_bvec(i, bytes, addr, NULL, memcpy_from_iter_mc);
+}
+
+static __always_inline
+size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (iov_iter_is_copy_mc(i))
- return (void *)copy_mc_to_kernel(to, from, size);
- return memcpy(to, from, size);
+ if (unlikely(iov_iter_is_copy_mc(i)))
+ return __copy_from_iter_mc(addr, bytes, i);
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter, memcpy_from_iter);
}
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
@@ -387,30 +276,44 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
if (user_backed_iter(i))
might_fault();
- iterate_and_advance(i, bytes, base, len, off,
- copyin(addr + off, base, len),
- memcpy_from_iter(i, addr + off, base, len)
- )
-
- return bytes;
+ return __copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL(_copy_from_iter);
+static __always_inline
+size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
+}
+
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_inatomic_nocache(addr + off, base, len),
- memcpy(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_nocache,
+ memcpy_from_iter);
}
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+static __always_inline
+size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ return __copy_from_user_flushcache(to + progress, iter_from, len);
+}
+
+static __always_inline
+size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
+ size_t len, void *to, void *priv2)
+{
+ memcpy_flushcache(to + progress, iter_from, len);
+ return 0;
+}
+
/**
* _copy_from_iter_flushcache - write destination through cpu cache
* @addr: destination kernel address
@@ -432,12 +335,9 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
if (WARN_ON_ONCE(!i->data_source))
return 0;
- iterate_and_advance(i, bytes, base, len, off,
- __copy_from_user_flushcache(addr + off, base, len),
- memcpy_flushcache(addr + off, base, len)
- )
-
- return bytes;
+ return iterate_and_advance(i, bytes, addr,
+ copy_from_user_iter_flushcache,
+ memcpy_from_iter_flushcache);
}
EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
#endif
@@ -509,10 +409,9 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte
void *kaddr = kmap_local_page(page);
size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
- iterate_and_advance(i, n, base, len, off,
- copyout_nofault(base, kaddr + offset + off, len),
- memcpy(base, kaddr + offset + off, len)
- )
+ n = iterate_and_advance(i, n, kaddr + offset,
+ copy_to_user_iter_nofault,
+ memcpy_to_iter);
kunmap_local(kaddr);
res += n;
bytes -= n;
@@ -555,14 +454,25 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
}
EXPORT_SYMBOL(copy_page_from_iter);
-size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+static __always_inline
+size_t zero_to_user_iter(void __user *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
{
- iterate_and_advance(i, bytes, base, len, count,
- clear_user(base, len),
- memset(base, 0, len)
- )
+ return clear_user(iter_to, len);
+}
+
+static __always_inline
+size_t zero_to_iter(void *iter_to, size_t progress,
+ size_t len, void *priv, void *priv2)
+{
+ memset(iter_to, 0, len);
+ return 0;
+}
- return bytes;
+size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
+{
+ return iterate_and_advance(i, bytes, NULL,
+ zero_to_user_iter, zero_to_iter);
}
EXPORT_SYMBOL(iov_iter_zero);
@@ -587,10 +497,7 @@ size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
}
p = kmap_atomic(page) + offset;
- iterate_and_advance(i, n, base, len, off,
- copyin(p + off, base, len),
- memcpy_from_iter(i, p + off, base, len)
- )
+ n = __copy_from_iter(p, n, i);
kunmap_atomic(p);
copied += n;
offset += n;
@@ -1181,78 +1088,6 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
- struct iov_iter *i)
-{
- __wsum sum, next;
- sum = *csum;
- if (WARN_ON_ONCE(!i->data_source))
- return 0;
-
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_from_user(base, addr + off, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(addr + off, base, len, sum, off);
- })
- )
- *csum = sum;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_from_iter);
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
- struct iov_iter *i)
-{
- struct csum_state *csstate = _csstate;
- __wsum sum, next;
-
- if (WARN_ON_ONCE(i->data_source))
- return 0;
- if (unlikely(iov_iter_is_discard(i))) {
- // can't use csum_memcpy() for that one - data is not copied
- csstate->csum = csum_block_add(csstate->csum,
- csum_partial(addr, bytes, 0),
- csstate->off);
- csstate->off += bytes;
- return bytes;
- }
-
- sum = csum_shift(csstate->csum, csstate->off);
- iterate_and_advance(i, bytes, base, len, off, ({
- next = csum_and_copy_to_user(addr + off, base, len);
- sum = csum_block_add(sum, next, off);
- next ? 0 : len;
- }), ({
- sum = csum_and_memcpy(base, addr + off, len, sum, off);
- })
- )
- csstate->csum = csum_shift(sum, csstate->off);
- csstate->off += bytes;
- return bytes;
-}
-EXPORT_SYMBOL(csum_and_copy_to_iter);
-
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i)
-{
-#ifdef CONFIG_CRYPTO_HASH
- struct ahash_request *hash = hashp;
- struct scatterlist sg;
- size_t copied;
-
- copied = copy_to_iter(addr, bytes, i);
- sg_init_one(&sg, addr, copied);
- ahash_request_set_crypt(hash, &sg, NULL, copied);
- crypto_ahash_update(hash);
- return copied;
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(hash_and_copy_to_iter);
-
static int iov_npages(const struct iov_iter *i, int maxpages)
{
size_t skip = i->iov_offset, size = i->count;
diff --git a/lib/kobject.c b/lib/kobject.c
index 59dbcbdb1c..72fa20f405 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -74,10 +74,12 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- error = sysfs_create_groups(kobj, ktype->default_groups);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
+ if (ktype) {
+ error = sysfs_create_groups(kobj, ktype->default_groups);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
+ }
}
/*
@@ -589,7 +591,8 @@ static void __kobject_del(struct kobject *kobj)
sd = kobj->sd;
ktype = get_ktype(kobj);
- sysfs_remove_groups(kobj, ktype->default_groups);
+ if (ktype)
+ sysfs_remove_groups(kobj, ktype->default_groups);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
@@ -666,6 +669,10 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
+ if (t && !t->release)
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ kobject_name(kobj), kobj);
+
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("'%s' (%p): auto cleanup kobject_del\n",
@@ -676,13 +683,10 @@ static void kobject_cleanup(struct kobject *kobj)
parent = NULL;
}
- if (t->release) {
+ if (t && t->release) {
pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
- } else {
- pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
- kobject_name(kobj), kobj);
}
/* free name if we allocated it */
@@ -1056,7 +1060,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
{
const struct kobj_ns_type_operations *ops = NULL;
- if (parent && parent->ktype->child_ns_type)
+ if (parent && parent->ktype && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 7c44b7ae4c..fb9a2f06dd 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -254,10 +254,10 @@ static int init_uevent_argv(struct kobj_uevent_env *env, const char *subsystem)
int buffer_size = sizeof(env->buf) - env->buflen;
int len;
- len = strlcpy(&env->buf[env->buflen], subsystem, buffer_size);
- if (len >= buffer_size) {
- pr_warn("init_uevent_argv: buffer size of %d too small, needed %d\n",
- buffer_size, len);
+ len = strscpy(&env->buf[env->buflen], subsystem, buffer_size);
+ if (len < 0) {
+ pr_warn("%s: insufficient buffer space (%u left) for %s\n",
+ __func__, buffer_size, subsystem);
return -ENOMEM;
}
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index 05a09652f5..dd1d633d0f 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -89,8 +89,7 @@ void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
EXPORT_SYMBOL_GPL(kunit_ptr_not_err_assert_format);
/* Checks if `text` is a literal representing `value`, e.g. "5" and 5 */
-static bool is_literal(struct kunit *test, const char *text, long long value,
- gfp_t gfp)
+static bool is_literal(const char *text, long long value)
{
char *buffer;
int len;
@@ -100,14 +99,15 @@ static bool is_literal(struct kunit *test, const char *text, long long value,
if (strlen(text) != len)
return false;
- buffer = kunit_kmalloc(test, len+1, gfp);
+ buffer = kmalloc(len+1, GFP_KERNEL);
if (!buffer)
return false;
snprintf(buffer, len+1, "%lld", value);
ret = strncmp(buffer, text, len) == 0;
- kunit_kfree(test, buffer);
+ kfree(buffer);
+
return ret;
}
@@ -125,14 +125,12 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert->text->left_text,
binary_assert->text->operation,
binary_assert->text->right_text);
- if (!is_literal(stream->test, binary_assert->text->left_text,
- binary_assert->left_value, stream->gfp))
+ if (!is_literal(binary_assert->text->left_text, binary_assert->left_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
binary_assert->text->left_text,
binary_assert->left_value,
binary_assert->left_value);
- if (!is_literal(stream->test, binary_assert->text->right_text,
- binary_assert->right_value, stream->gfp))
+ if (!is_literal(binary_assert->text->right_text, binary_assert->right_value))
string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
binary_assert->text->right_text,
binary_assert->right_value,
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 35ddb329da..382706dfb4 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -37,14 +37,21 @@ void kunit_debugfs_init(void)
debugfs_rootdir = debugfs_create_dir(KUNIT_DEBUGFS_ROOT, NULL);
}
-static void debugfs_print_result(struct seq_file *seq,
- struct kunit_suite *suite,
- struct kunit_case *test_case)
+static void debugfs_print_result(struct seq_file *seq, struct string_stream *log)
{
- if (!test_case || !test_case->log)
+ struct string_stream_fragment *frag_container;
+
+ if (!log)
return;
- seq_printf(seq, "%s", test_case->log);
+ /*
+ * Walk the fragments so we don't need to allocate a temporary
+ * buffer to hold the entire string.
+ */
+ spin_lock(&log->lock);
+ list_for_each_entry(frag_container, &log->fragments, node)
+ seq_printf(seq, "%s", frag_container->fragment);
+ spin_unlock(&log->lock);
}
/*
@@ -71,10 +78,9 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
seq_printf(seq, KUNIT_SUBTEST_INDENT "1..%zd\n", kunit_suite_num_test_cases(suite));
kunit_suite_for_each_test_case(suite, test_case)
- debugfs_print_result(seq, suite, test_case);
+ debugfs_print_result(seq, test_case->log);
- if (suite->log)
- seq_printf(seq, "%s", suite->log);
+ debugfs_print_result(seq, suite->log);
seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
@@ -105,17 +111,41 @@ static const struct file_operations debugfs_results_fops = {
void kunit_debugfs_create_suite(struct kunit_suite *suite)
{
struct kunit_case *test_case;
+ struct string_stream *stream;
+
+ /*
+ * Allocate logs before creating debugfs representation.
+ * The suite->log and test_case->log pointer are expected to be NULL
+ * if there isn't a log, so only set it if the log stream was created
+ * successfully.
+ */
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ return;
- /* Allocate logs before creating debugfs representation. */
- suite->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
- kunit_suite_for_each_test_case(suite, test_case)
- test_case->log = kzalloc(KUNIT_LOG_SIZE, GFP_KERNEL);
+ string_stream_set_append_newlines(stream, true);
+ suite->log = stream;
+
+ kunit_suite_for_each_test_case(suite, test_case) {
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ goto err;
+
+ string_stream_set_append_newlines(stream, true);
+ test_case->log = stream;
+ }
suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
suite->debugfs,
suite, &debugfs_results_fops);
+ return;
+
+err:
+ string_stream_destroy(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ string_stream_destroy(test_case->log);
}
void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
@@ -123,7 +153,7 @@ void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
struct kunit_case *test_case;
debugfs_remove_recursive(suite->debugfs);
- kfree(suite->log);
+ string_stream_destroy(suite->log);
kunit_suite_for_each_test_case(suite, test_case)
- kfree(test_case->log);
+ string_stream_destroy(test_case->log);
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 1236b3cd2f..51013feba5 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -144,6 +144,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
kfree(suite_set.start);
}
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
struct kunit_suite_set
kunit_filter_suites(const struct kunit_suite_set *suite_set,
const char *filter_glob,
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 01a769f35e..6bb5c2ef66 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -190,6 +190,7 @@ static void example_static_stub_test(struct kunit *test)
static const struct example_param {
int value;
} example_params_array[] = {
+ { .value = 3, },
{ .value = 2, },
{ .value = 1, },
{ .value = 0, },
@@ -213,8 +214,8 @@ static void example_params_test(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, param);
/* Test can be skipped on unsupported param values */
- if (!param->value)
- kunit_skip(test, "unsupported param value");
+ if (!is_power_of_2(param->value))
+ kunit_skip(test, "unsupported param value %d", param->value);
/* You can use param values for parameterized testing */
KUNIT_EXPECT_EQ(test, param->value % param->value, 0);
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 83d8e90ca7..ee6927c609 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -8,6 +8,7 @@
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include "string-stream.h"
#include "try-catch-impl.h"
struct kunit_try_catch_test_context {
@@ -530,12 +531,24 @@ static struct kunit_suite kunit_resource_test_suite = {
.test_cases = kunit_resource_test_cases,
};
+/*
+ * Log tests call string_stream functions, which aren't exported. So only
+ * build this code if this test is built-in.
+ */
+#if IS_BUILTIN(CONFIG_KUNIT_TEST)
+
+/* This avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
+
static void kunit_log_test(struct kunit *test)
{
struct kunit_suite suite;
-
- suite.log = kunit_kzalloc(test, KUNIT_LOG_SIZE, GFP_KERNEL);
+#ifdef CONFIG_KUNIT_DEBUGFS
+ char *full_log;
+#endif
+ suite.log = kunit_alloc_string_stream(test, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, suite.log);
+ string_stream_set_append_newlines(suite.log, true);
kunit_log(KERN_INFO, test, "put this in log.");
kunit_log(KERN_INFO, test, "this too.");
@@ -543,14 +556,21 @@ static void kunit_log_test(struct kunit *test)
kunit_log(KERN_INFO, &suite, "along with this.");
#ifdef CONFIG_KUNIT_DEBUGFS
+ KUNIT_EXPECT_TRUE(test, test->log->append_newlines);
+
+ full_log = string_stream_get_string(test->log);
+ kunit_add_action(test, kfree_wrapper, full_log);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(test->log, "put this in log."));
+ strstr(full_log, "put this in log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(test->log, "this too."));
+ strstr(full_log, "this too."));
+
+ full_log = string_stream_get_string(suite.log);
+ kunit_add_action(test, kfree_wrapper, full_log);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite.log, "add to suite log."));
+ strstr(full_log, "add to suite log."));
KUNIT_EXPECT_NOT_ERR_OR_NULL(test,
- strstr(suite.log, "along with this."));
+ strstr(full_log, "along with this."));
#else
KUNIT_EXPECT_NULL(test, test->log);
#endif
@@ -558,15 +578,30 @@ static void kunit_log_test(struct kunit *test)
static void kunit_log_newline_test(struct kunit *test)
{
+ char *full_log;
+
kunit_info(test, "Add newline\n");
if (test->log) {
- KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(test->log, "Add newline\n"),
- "Missing log line, full log:\n%s", test->log);
- KUNIT_EXPECT_NULL(test, strstr(test->log, "Add newline\n\n"));
+ full_log = string_stream_get_string(test->log);
+ kunit_add_action(test, kfree_wrapper, full_log);
+ KUNIT_ASSERT_NOT_NULL_MSG(test, strstr(full_log, "Add newline\n"),
+ "Missing log line, full log:\n%s", full_log);
+ KUNIT_EXPECT_NULL(test, strstr(full_log, "Add newline\n\n"));
} else {
kunit_skip(test, "only useful when debugfs is enabled");
}
}
+#else
+static void kunit_log_test(struct kunit *test)
+{
+ kunit_skip(test, "Log tests only run when built-in");
+}
+
+static void kunit_log_newline_test(struct kunit *test)
+{
+ kunit_skip(test, "Log tests only run when built-in");
+}
+#endif /* IS_BUILTIN(CONFIG_KUNIT_TEST) */
static struct kunit_case kunit_log_test_cases[] = {
KUNIT_CASE(kunit_log_test),
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 110f3a9932..06822766f2 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -6,48 +6,539 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include <kunit/static_stub.h>
#include <kunit/test.h>
+#include <linux/ktime.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include "string-stream.h"
-static void string_stream_test_empty_on_creation(struct kunit *test)
+struct string_stream_test_priv {
+ /* For testing resource-managed free. */
+ struct string_stream *expected_free_stream;
+ bool stream_was_freed;
+ bool stream_free_again;
+};
+
+/* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
+static void kfree_wrapper(void *p)
+{
+ kfree(p);
+}
+
+/* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */
+static void cleanup_raw_stream(void *p)
+{
+ struct string_stream *stream = p;
+
+ string_stream_destroy(stream);
+}
+
+static char *get_concatenated_string(struct kunit *test, struct string_stream *stream)
+{
+ char *str = string_stream_get_string(stream);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, str);
+ kunit_add_action(test, kfree_wrapper, (void *)str);
+
+ return str;
+}
+
+/* Managed string_stream object is initialized correctly. */
+static void string_stream_managed_init_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ /* Resource-managed initialization. */
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ KUNIT_EXPECT_EQ(test, stream->length, 0);
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+ KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL));
+ KUNIT_EXPECT_FALSE(test, stream->append_newlines);
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+/* Unmanaged string_stream object is initialized correctly. */
+static void string_stream_unmanaged_init_test(struct kunit *test)
+{
+ struct string_stream *stream;
+
+ stream = alloc_string_stream(GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ kunit_add_action(test, cleanup_raw_stream, stream);
+
+ KUNIT_EXPECT_EQ(test, stream->length, 0);
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+ KUNIT_EXPECT_EQ(test, stream->gfp, GFP_KERNEL);
+ KUNIT_EXPECT_FALSE(test, stream->append_newlines);
+
+ KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+}
+
+static void string_stream_destroy_stub(struct string_stream *stream)
+{
+ struct kunit *fake_test = kunit_get_current_test();
+ struct string_stream_test_priv *priv = fake_test->priv;
+
+ /* The kunit could own string_streams other than the one we are testing. */
+ if (stream == priv->expected_free_stream) {
+ if (priv->stream_was_freed)
+ priv->stream_free_again = true;
+ else
+ priv->stream_was_freed = true;
+ }
+
+ /*
+ * Calling string_stream_destroy() will only call this function again
+ * because the redirection stub is still active.
+ * Avoid calling deactivate_static_stub() or changing current->kunit_test
+ * during cleanup.
+ */
+ string_stream_clear(stream);
+ kfree(stream);
+}
+
+/* kunit_free_string_stream() calls string_stream_desrtoy() */
+static void string_stream_managed_free_test(struct kunit *test)
+{
+ struct string_stream_test_priv *priv = test->priv;
+
+ priv->expected_free_stream = NULL;
+ priv->stream_was_freed = false;
+ priv->stream_free_again = false;
+
+ kunit_activate_static_stub(test,
+ string_stream_destroy,
+ string_stream_destroy_stub);
+
+ priv->expected_free_stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->expected_free_stream);
+
+ /* This should call the stub function. */
+ kunit_free_string_stream(test, priv->expected_free_stream);
+
+ KUNIT_EXPECT_TRUE(test, priv->stream_was_freed);
+ KUNIT_EXPECT_FALSE(test, priv->stream_free_again);
+}
+
+/* string_stream object is freed when test is cleaned up. */
+static void string_stream_resource_free_test(struct kunit *test)
+{
+ struct string_stream_test_priv *priv = test->priv;
+ struct kunit *fake_test;
+
+ fake_test = kunit_kzalloc(test, sizeof(*fake_test), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, fake_test);
+
+ kunit_init_test(fake_test, "string_stream_fake_test", NULL);
+ fake_test->priv = priv;
+
+ /*
+ * Activate stub before creating string_stream so the
+ * string_stream will be cleaned up first.
+ */
+ priv->expected_free_stream = NULL;
+ priv->stream_was_freed = false;
+ priv->stream_free_again = false;
+
+ kunit_activate_static_stub(fake_test,
+ string_stream_destroy,
+ string_stream_destroy_stub);
+
+ priv->expected_free_stream = kunit_alloc_string_stream(fake_test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->expected_free_stream);
+
+ /* Set current->kunit_test to fake_test so the static stub will be called. */
+ current->kunit_test = fake_test;
+
+ /* Cleanup test - the stub function should be called */
+ kunit_cleanup(fake_test);
+
+ /* Set current->kunit_test back to current test. */
+ current->kunit_test = test;
+
+ KUNIT_EXPECT_TRUE(test, priv->stream_was_freed);
+ KUNIT_EXPECT_FALSE(test, priv->stream_free_again);
+}
+
+/*
+ * Add a series of lines to a string_stream. Check that all lines
+ * appear in the correct order and no characters are dropped.
+ */
+static void string_stream_line_add_test(struct kunit *test)
+{
+ struct string_stream *stream;
+ char line[60];
+ char *concat_string, *pos, *string_end;
+ size_t len, total_len;
+ int num_lines, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /* Add series of sequence numbered lines */
+ total_len = 0;
+ for (i = 0; i < 100; ++i) {
+ len = snprintf(line, sizeof(line),
+ "The quick brown fox jumps over the lazy penguin %d\n", i);
+
+ /* Sanity-check that our test string isn't truncated */
+ KUNIT_ASSERT_LT(test, len, sizeof(line));
+
+ string_stream_add(stream, line);
+ total_len += len;
+ }
+ num_lines = i;
+
+ concat_string = get_concatenated_string(test, stream);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, concat_string);
+ KUNIT_EXPECT_EQ(test, strlen(concat_string), total_len);
+
+ /*
+ * Split the concatenated string at the newlines and check that
+ * all the original added strings are present.
+ */
+ pos = concat_string;
+ for (i = 0; i < num_lines; ++i) {
+ string_end = strchr(pos, '\n');
+ KUNIT_EXPECT_NOT_NULL(test, string_end);
+
+ /* Convert to NULL-terminated string */
+ *string_end = '\0';
+
+ snprintf(line, sizeof(line),
+ "The quick brown fox jumps over the lazy penguin %d", i);
+ KUNIT_EXPECT_STREQ(test, pos, line);
+
+ pos = string_end + 1;
+ }
+
+ /* There shouldn't be any more data after this */
+ KUNIT_EXPECT_EQ(test, strlen(pos), 0);
+}
+
+/* Add a series of lines of variable length to a string_stream. */
+static void string_stream_variable_length_line_test(struct kunit *test)
+{
+ static const char line[] =
+ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ " 0123456789!$%^&*()_-+={}[]:;@'~#<>,.?/|";
+ struct string_stream *stream;
+ struct rnd_state rnd;
+ char *concat_string, *pos, *string_end;
+ size_t offset, total_len;
+ int num_lines, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ /*
+ * Log many lines of varying lengths until we have created
+ * many fragments.
+ * The "randomness" must be repeatable.
+ */
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ total_len = 0;
+ for (i = 0; i < 100; ++i) {
+ offset = prandom_u32_state(&rnd) % (sizeof(line) - 1);
+ string_stream_add(stream, "%s\n", &line[offset]);
+ total_len += sizeof(line) - offset;
+ }
+ num_lines = i;
+
+ concat_string = get_concatenated_string(test, stream);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, concat_string);
+ KUNIT_EXPECT_EQ(test, strlen(concat_string), total_len);
+
+ /*
+ * Split the concatenated string at the newlines and check that
+ * all the original added strings are present.
+ */
+ prandom_seed_state(&rnd, 3141592653589793238ULL);
+ pos = concat_string;
+ for (i = 0; i < num_lines; ++i) {
+ string_end = strchr(pos, '\n');
+ KUNIT_EXPECT_NOT_NULL(test, string_end);
+
+ /* Convert to NULL-terminated string */
+ *string_end = '\0';
+
+ offset = prandom_u32_state(&rnd) % (sizeof(line) - 1);
+ KUNIT_EXPECT_STREQ(test, pos, &line[offset]);
+
+ pos = string_end + 1;
+ }
+
+ /* There shouldn't be any more data after this */
+ KUNIT_EXPECT_EQ(test, strlen(pos), 0);
+}
+
+/* Appending the content of one string stream to another. */
+static void string_stream_append_test(struct kunit *test)
+{
+ static const char * const strings_1[] = {
+ "one", "two", "three", "four", "five", "six",
+ "seven", "eight", "nine", "ten",
+ };
+ static const char * const strings_2[] = {
+ "Apple", "Pear", "Orange", "Banana", "Grape", "Apricot",
+ };
+ struct string_stream *stream_1, *stream_2;
+ const char *stream1_content_before_append, *stream_2_content;
+ char *combined_content;
+ size_t combined_length;
+ int i;
+
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /* Append content of empty stream to empty stream */
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_EQ(test, strlen(get_concatenated_string(test, stream_1)), 0);
+
+ /* Add some data to stream_1 */
+ for (i = 0; i < ARRAY_SIZE(strings_1); ++i)
+ string_stream_add(stream_1, "%s\n", strings_1[i]);
+
+ stream1_content_before_append = get_concatenated_string(test, stream_1);
+
+ /* Append content of empty stream to non-empty stream */
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ stream1_content_before_append);
+
+ /* Add some data to stream_2 */
+ for (i = 0; i < ARRAY_SIZE(strings_2); ++i)
+ string_stream_add(stream_2, "%s\n", strings_2[i]);
+
+ /* Append content of non-empty stream to non-empty stream */
+ string_stream_append(stream_1, stream_2);
+
+ /*
+ * End result should be the original content of stream_1 plus
+ * the content of stream_2.
+ */
+ stream_2_content = get_concatenated_string(test, stream_2);
+ combined_length = strlen(stream1_content_before_append) + strlen(stream_2_content);
+ combined_length++; /* for terminating \0 */
+ combined_content = kunit_kmalloc(test, combined_length, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, combined_content);
+ snprintf(combined_content, combined_length, "%s%s",
+ stream1_content_before_append, stream_2_content);
+
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1), combined_content);
+
+ /* Append content of non-empty stream to empty stream */
+ kunit_free_string_stream(test, stream_1);
+
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1), stream_2_content);
+}
+
+/* Appending the content of one string stream to one with auto-newlining. */
+static void string_stream_append_auto_newline_test(struct kunit *test)
+{
+ struct string_stream *stream_1, *stream_2;
+
+ /* Stream 1 has newline appending enabled */
+ stream_1 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_1);
+ string_stream_set_append_newlines(stream_1, true);
+ KUNIT_EXPECT_TRUE(test, stream_1->append_newlines);
+
+ /* Stream 2 does not append newlines */
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /* Appending a stream with a newline should not add another newline */
+ string_stream_add(stream_1, "Original string\n");
+ string_stream_add(stream_2, "Appended content\n");
+ string_stream_add(stream_2, "More stuff\n");
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ "Original string\nAppended content\nMore stuff\n");
+
+ kunit_free_string_stream(test, stream_2);
+ stream_2 = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream_2);
+
+ /*
+ * Appending a stream without newline should add a final newline.
+ * The appended string_stream is treated as a single string so newlines
+ * should not be inserted between fragments.
+ */
+ string_stream_add(stream_2, "Another");
+ string_stream_add(stream_2, "And again");
+ string_stream_append(stream_1, stream_2);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream_1),
+ "Original string\nAppended content\nMore stuff\nAnotherAnd again\n");
+}
+
+/* Adding an empty string should not create a fragment. */
+static void string_stream_append_empty_string_test(struct kunit *test)
{
- struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+ struct string_stream *stream;
+ int original_frag_count;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+ /* Formatted empty string */
+ string_stream_add(stream, "%s", "");
KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
+ KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
+
+ /* Adding an empty string to a non-empty stream */
+ string_stream_add(stream, "Add this line");
+ original_frag_count = list_count_nodes(&stream->fragments);
+
+ string_stream_add(stream, "%s", "");
+ KUNIT_EXPECT_EQ(test, list_count_nodes(&stream->fragments), original_frag_count);
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream), "Add this line");
}
-static void string_stream_test_not_empty_after_add(struct kunit *test)
+/* Adding strings without automatic newline appending */
+static void string_stream_no_auto_newline_test(struct kunit *test)
{
- struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
+ struct string_stream *stream;
- string_stream_add(stream, "Foo");
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
- KUNIT_EXPECT_FALSE(test, string_stream_is_empty(stream));
+ /*
+ * Add some strings with and without newlines. All formatted newlines
+ * should be preserved. It should not add any extra newlines.
+ */
+ string_stream_add(stream, "One");
+ string_stream_add(stream, "Two\n");
+ string_stream_add(stream, "%s\n", "Three");
+ string_stream_add(stream, "%s", "Four\n");
+ string_stream_add(stream, "Five\n%s", "Six");
+ string_stream_add(stream, "Seven\n\n");
+ string_stream_add(stream, "Eight");
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream),
+ "OneTwo\nThree\nFour\nFive\nSixSeven\n\nEight");
}
-static void string_stream_test_get_string(struct kunit *test)
+/* Adding strings with automatic newline appending */
+static void string_stream_auto_newline_test(struct kunit *test)
{
- struct string_stream *stream = alloc_string_stream(test, GFP_KERNEL);
- char *output;
+ struct string_stream *stream;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ string_stream_set_append_newlines(stream, true);
+ KUNIT_EXPECT_TRUE(test, stream->append_newlines);
+
+ /*
+ * Add some strings with and without newlines. Newlines should
+ * be appended to lines that do not end with \n, but newlines
+ * resulting from the formatting should not be changed.
+ */
+ string_stream_add(stream, "One");
+ string_stream_add(stream, "Two\n");
+ string_stream_add(stream, "%s\n", "Three");
+ string_stream_add(stream, "%s", "Four\n");
+ string_stream_add(stream, "Five\n%s", "Six");
+ string_stream_add(stream, "Seven\n\n");
+ string_stream_add(stream, "Eight");
+ KUNIT_EXPECT_STREQ(test, get_concatenated_string(test, stream),
+ "One\nTwo\nThree\nFour\nFive\nSix\nSeven\n\nEight\n");
+}
+
+/*
+ * This doesn't actually "test" anything. It reports time taken
+ * and memory used for logging a large number of lines.
+ */
+static void string_stream_performance_test(struct kunit *test)
+{
+ struct string_stream_fragment *frag_container;
+ struct string_stream *stream;
+ char test_line[101];
+ ktime_t start_time, end_time;
+ size_t len, bytes_requested, actual_bytes_used, total_string_length;
+ int offset, i;
+
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, stream);
+
+ memset(test_line, 'x', sizeof(test_line) - 1);
+ test_line[sizeof(test_line) - 1] = '\0';
+
+ start_time = ktime_get();
+ for (i = 0; i < 10000; i++) {
+ offset = i % (sizeof(test_line) - 1);
+ string_stream_add(stream, "%s: %d\n", &test_line[offset], i);
+ }
+ end_time = ktime_get();
+
+ /*
+ * Calculate memory used. This doesn't include invisible
+ * overhead due to kernel allocator fragment size rounding.
+ */
+ bytes_requested = sizeof(*stream);
+ actual_bytes_used = ksize(stream);
+ total_string_length = 0;
+
+ list_for_each_entry(frag_container, &stream->fragments, node) {
+ bytes_requested += sizeof(*frag_container);
+ actual_bytes_used += ksize(frag_container);
+
+ len = strlen(frag_container->fragment);
+ total_string_length += len;
+ bytes_requested += len + 1; /* +1 for '\0' */
+ actual_bytes_used += ksize(frag_container->fragment);
+ }
+
+ kunit_info(test, "Time elapsed: %lld us\n",
+ ktime_us_delta(end_time, start_time));
+ kunit_info(test, "Total string length: %zu\n", total_string_length);
+ kunit_info(test, "Bytes requested: %zu\n", bytes_requested);
+ kunit_info(test, "Actual bytes allocated: %zu\n", actual_bytes_used);
+}
+
+static int string_stream_test_init(struct kunit *test)
+{
+ struct string_stream_test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
- string_stream_add(stream, "Foo");
- string_stream_add(stream, " %s", "bar");
+ test->priv = priv;
- output = string_stream_get_string(stream);
- KUNIT_ASSERT_STREQ(test, output, "Foo bar");
+ return 0;
}
static struct kunit_case string_stream_test_cases[] = {
- KUNIT_CASE(string_stream_test_empty_on_creation),
- KUNIT_CASE(string_stream_test_not_empty_after_add),
- KUNIT_CASE(string_stream_test_get_string),
+ KUNIT_CASE(string_stream_managed_init_test),
+ KUNIT_CASE(string_stream_unmanaged_init_test),
+ KUNIT_CASE(string_stream_managed_free_test),
+ KUNIT_CASE(string_stream_resource_free_test),
+ KUNIT_CASE(string_stream_line_add_test),
+ KUNIT_CASE(string_stream_variable_length_line_test),
+ KUNIT_CASE(string_stream_append_test),
+ KUNIT_CASE(string_stream_append_auto_newline_test),
+ KUNIT_CASE(string_stream_append_empty_string_test),
+ KUNIT_CASE(string_stream_no_auto_newline_test),
+ KUNIT_CASE(string_stream_auto_newline_test),
+ KUNIT_CASE(string_stream_performance_test),
{}
};
static struct kunit_suite string_stream_test_suite = {
.name = "string-stream-test",
- .test_cases = string_stream_test_cases
+ .test_cases = string_stream_test_cases,
+ .init = string_stream_test_init,
};
kunit_test_suites(&string_stream_test_suite);
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index cc32743c11..a6f3616c20 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -6,6 +6,7 @@
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include <kunit/static_stub.h>
#include <kunit/test.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -13,30 +14,28 @@
#include "string-stream.h"
-static struct string_stream_fragment *alloc_string_stream_fragment(
- struct kunit *test, int len, gfp_t gfp)
+static struct string_stream_fragment *alloc_string_stream_fragment(int len, gfp_t gfp)
{
struct string_stream_fragment *frag;
- frag = kunit_kzalloc(test, sizeof(*frag), gfp);
+ frag = kzalloc(sizeof(*frag), gfp);
if (!frag)
return ERR_PTR(-ENOMEM);
- frag->fragment = kunit_kmalloc(test, len, gfp);
+ frag->fragment = kmalloc(len, gfp);
if (!frag->fragment) {
- kunit_kfree(test, frag);
+ kfree(frag);
return ERR_PTR(-ENOMEM);
}
return frag;
}
-static void string_stream_fragment_destroy(struct kunit *test,
- struct string_stream_fragment *frag)
+static void string_stream_fragment_destroy(struct string_stream_fragment *frag)
{
list_del(&frag->node);
- kunit_kfree(test, frag->fragment);
- kunit_kfree(test, frag);
+ kfree(frag->fragment);
+ kfree(frag);
}
int string_stream_vadd(struct string_stream *stream,
@@ -44,26 +43,44 @@ int string_stream_vadd(struct string_stream *stream,
va_list args)
{
struct string_stream_fragment *frag_container;
- int len;
+ int buf_len, result_len;
va_list args_for_counting;
/* Make a copy because `vsnprintf` could change it */
va_copy(args_for_counting, args);
- /* Need space for null byte. */
- len = vsnprintf(NULL, 0, fmt, args_for_counting) + 1;
+ /* Evaluate length of formatted string */
+ buf_len = vsnprintf(NULL, 0, fmt, args_for_counting);
va_end(args_for_counting);
- frag_container = alloc_string_stream_fragment(stream->test,
- len,
- stream->gfp);
+ if (buf_len == 0)
+ return 0;
+
+ /* Reserve one extra for possible appended newline. */
+ if (stream->append_newlines)
+ buf_len++;
+
+ /* Need space for null byte. */
+ buf_len++;
+
+ frag_container = alloc_string_stream_fragment(buf_len, stream->gfp);
if (IS_ERR(frag_container))
return PTR_ERR(frag_container);
- len = vsnprintf(frag_container->fragment, len, fmt, args);
+ if (stream->append_newlines) {
+ /* Don't include reserved newline byte in writeable length. */
+ result_len = vsnprintf(frag_container->fragment, buf_len - 1, fmt, args);
+
+ /* Append newline if necessary. */
+ if (frag_container->fragment[result_len - 1] != '\n')
+ result_len = strlcat(frag_container->fragment, "\n", buf_len);
+ } else {
+ result_len = vsnprintf(frag_container->fragment, buf_len, fmt, args);
+ }
+
spin_lock(&stream->lock);
- stream->length += len;
+ stream->length += result_len;
list_add_tail(&frag_container->node, &stream->fragments);
spin_unlock(&stream->lock);
@@ -82,7 +99,7 @@ int string_stream_add(struct string_stream *stream, const char *fmt, ...)
return result;
}
-static void string_stream_clear(struct string_stream *stream)
+void string_stream_clear(struct string_stream *stream)
{
struct string_stream_fragment *frag_container, *frag_container_safe;
@@ -91,7 +108,7 @@ static void string_stream_clear(struct string_stream *stream)
frag_container_safe,
&stream->fragments,
node) {
- string_stream_fragment_destroy(stream->test, frag_container);
+ string_stream_fragment_destroy(frag_container);
}
stream->length = 0;
spin_unlock(&stream->lock);
@@ -103,7 +120,7 @@ char *string_stream_get_string(struct string_stream *stream)
size_t buf_len = stream->length + 1; /* +1 for null byte. */
char *buf;
- buf = kunit_kzalloc(stream->test, buf_len, stream->gfp);
+ buf = kzalloc(buf_len, stream->gfp);
if (!buf)
return NULL;
@@ -119,13 +136,17 @@ int string_stream_append(struct string_stream *stream,
struct string_stream *other)
{
const char *other_content;
+ int ret;
other_content = string_stream_get_string(other);
if (!other_content)
return -ENOMEM;
- return string_stream_add(stream, other_content);
+ ret = string_stream_add(stream, other_content);
+ kfree(other_content);
+
+ return ret;
}
bool string_stream_is_empty(struct string_stream *stream)
@@ -133,16 +154,15 @@ bool string_stream_is_empty(struct string_stream *stream)
return list_empty(&stream->fragments);
}
-struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
+struct string_stream *alloc_string_stream(gfp_t gfp)
{
struct string_stream *stream;
- stream = kunit_kzalloc(test, sizeof(*stream), gfp);
+ stream = kzalloc(sizeof(*stream), gfp);
if (!stream)
return ERR_PTR(-ENOMEM);
stream->gfp = gfp;
- stream->test = test;
INIT_LIST_HEAD(&stream->fragments);
spin_lock_init(&stream->lock);
@@ -151,5 +171,37 @@ struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
void string_stream_destroy(struct string_stream *stream)
{
+ KUNIT_STATIC_STUB_REDIRECT(string_stream_destroy, stream);
+
+ if (!stream)
+ return;
+
string_stream_clear(stream);
+ kfree(stream);
+}
+
+static void resource_free_string_stream(void *p)
+{
+ struct string_stream *stream = p;
+
+ string_stream_destroy(stream);
+}
+
+struct string_stream *kunit_alloc_string_stream(struct kunit *test, gfp_t gfp)
+{
+ struct string_stream *stream;
+
+ stream = alloc_string_stream(gfp);
+ if (IS_ERR(stream))
+ return stream;
+
+ if (kunit_add_action_or_reset(test, resource_free_string_stream, stream) != 0)
+ return ERR_PTR(-ENOMEM);
+
+ return stream;
+}
+
+void kunit_free_string_stream(struct kunit *test, struct string_stream *stream)
+{
+ kunit_release_action(test, resource_free_string_stream, (void *)stream);
}
diff --git a/lib/kunit/string-stream.h b/lib/kunit/string-stream.h
index b669f9a75a..7be2450c70 100644
--- a/lib/kunit/string-stream.h
+++ b/lib/kunit/string-stream.h
@@ -23,13 +23,17 @@ struct string_stream {
struct list_head fragments;
/* length and fragments are protected by this lock */
spinlock_t lock;
- struct kunit *test;
gfp_t gfp;
+ bool append_newlines;
};
struct kunit;
-struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp);
+struct string_stream *kunit_alloc_string_stream(struct kunit *test, gfp_t gfp);
+void kunit_free_string_stream(struct kunit *test, struct string_stream *stream);
+
+struct string_stream *alloc_string_stream(gfp_t gfp);
+void free_string_stream(struct string_stream *stream);
int __printf(2, 3) string_stream_add(struct string_stream *stream,
const char *fmt, ...);
@@ -38,6 +42,8 @@ int __printf(2, 0) string_stream_vadd(struct string_stream *stream,
const char *fmt,
va_list args);
+void string_stream_clear(struct string_stream *stream);
+
char *string_stream_get_string(struct string_stream *stream);
int string_stream_append(struct string_stream *stream,
@@ -47,4 +53,10 @@ bool string_stream_is_empty(struct string_stream *stream);
void string_stream_destroy(struct string_stream *stream);
+static inline void string_stream_set_append_newlines(struct string_stream *stream,
+ bool append_newlines)
+{
+ stream->append_newlines = append_newlines;
+}
+
#endif /* _KUNIT_STRING_STREAM_H */
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 7452d1a2ac..d6a2b2460d 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -16,6 +16,7 @@
#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
+#include <linux/mm.h>
#include "debugfs.h"
#include "hooks-impl.h"
@@ -109,51 +110,17 @@ static void kunit_print_test_stats(struct kunit *test,
stats.total);
}
-/**
- * kunit_log_newline() - Add newline to the end of log if one is not
- * already present.
- * @log: The log to add the newline to.
- */
-static void kunit_log_newline(char *log)
-{
- int log_len, len_left;
-
- log_len = strlen(log);
- len_left = KUNIT_LOG_SIZE - log_len - 1;
-
- if (log_len > 0 && log[log_len - 1] != '\n')
- strncat(log, "\n", len_left);
-}
-
-/*
- * Append formatted message to log, size of which is limited to
- * KUNIT_LOG_SIZE bytes (including null terminating byte).
- */
-void kunit_log_append(char *log, const char *fmt, ...)
+/* Append formatted message to log. */
+void kunit_log_append(struct string_stream *log, const char *fmt, ...)
{
va_list args;
- int len, log_len, len_left;
if (!log)
return;
- log_len = strlen(log);
- len_left = KUNIT_LOG_SIZE - log_len - 1;
- if (len_left <= 0)
- return;
-
- /* Evaluate length of line to add to log */
- va_start(args, fmt);
- len = vsnprintf(NULL, 0, fmt, args) + 1;
- va_end(args);
-
- /* Print formatted line to the log */
va_start(args, fmt);
- vsnprintf(log + log_len, min(len, len_left), fmt, args);
+ string_stream_vadd(log, fmt, args);
va_end(args);
-
- /* Add newline to end of log if not already present. */
- kunit_log_newline(log);
}
EXPORT_SYMBOL_GPL(kunit_log_append);
@@ -296,7 +263,7 @@ static void kunit_print_string_stream(struct kunit *test,
kunit_err(test, "\n");
} else {
kunit_err(test, "%s", buf);
- kunit_kfree(test, buf);
+ kfree(buf);
}
}
@@ -308,7 +275,7 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
kunit_set_failure(test);
- stream = alloc_string_stream(test, GFP_KERNEL);
+ stream = kunit_alloc_string_stream(test, GFP_KERNEL);
if (IS_ERR(stream)) {
WARN(true,
"Could not allocate stream to print failed assertion in %s:%d\n",
@@ -322,7 +289,7 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc,
kunit_print_string_stream(test, stream);
- string_stream_destroy(stream);
+ kunit_free_string_stream(test, stream);
}
void __noreturn __kunit_abort(struct kunit *test)
@@ -359,14 +326,14 @@ void __kunit_do_failed_assertion(struct kunit *test,
}
EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion);
-void kunit_init_test(struct kunit *test, const char *name, char *log)
+void kunit_init_test(struct kunit *test, const char *name, struct string_stream *log)
{
spin_lock_init(&test->lock);
INIT_LIST_HEAD(&test->resources);
test->name = name;
test->log = log;
if (test->log)
- test->log[0] = '\0';
+ string_stream_clear(log);
test->status = KUNIT_SUCCESS;
test->status_comment[0] = '\0';
}
@@ -686,12 +653,15 @@ int kunit_run_tests(struct kunit_suite *suite)
param_desc,
test.status_comment);
+ kunit_update_stats(&param_stats, test.status);
+
/* Get next param. */
param_desc[0] = '\0';
test.param_value = test_case->generate_params(test.param_value, param_desc);
test.param_index++;
-
- kunit_update_stats(&param_stats, test.status);
+ test.status = KUNIT_SUCCESS;
+ test.status_comment[0] = '\0';
+ test.priv = NULL;
}
}
@@ -807,12 +777,19 @@ static void kunit_module_exit(struct module *mod)
};
const char *action = kunit_action();
+ /*
+ * Check if the start address is a valid virtual address to detect
+ * if the module load sequence has failed and the suite set has not
+ * been initialized and filtered.
+ */
+ if (!suite_set.start || !virt_addr_valid(suite_set.start))
+ return;
+
if (!action)
__kunit_test_suites_exit(mod->kunit_suites,
mod->num_kunit_suites);
- if (suite_set.start)
- kunit_free_suite_set(suite_set);
+ kunit_free_suite_set(suite_set);
}
static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
@@ -822,12 +799,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
switch (val) {
case MODULE_STATE_LIVE:
+ kunit_module_init(mod);
break;
case MODULE_STATE_GOING:
kunit_module_exit(mod);
break;
case MODULE_STATE_COMING:
- kunit_module_init(mod);
break;
case MODULE_STATE_UNFORMED:
break;
@@ -842,6 +819,8 @@ static struct notifier_block kunit_mod_nb = {
};
#endif
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_action_wrapper, kfree, const void *)
+
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
void *data;
@@ -851,7 +830,7 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
if (!data)
return NULL;
- if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0)
+ if (kunit_add_action_or_reset(test, kfree_action_wrapper, data) != 0)
return NULL;
return data;
@@ -863,7 +842,7 @@ void kunit_kfree(struct kunit *test, const void *ptr)
if (!ptr)
return;
- kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr);
+ kunit_release_action(test, kfree_action_wrapper, (void *)ptr);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
diff --git a/lib/llist.c b/lib/llist.c
index 6e668fa5a2..f21d0cfbba 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -66,6 +66,34 @@ struct llist_node *llist_del_first(struct llist_head *head)
EXPORT_SYMBOL_GPL(llist_del_first);
/**
+ * llist_del_first_this - delete given entry of lock-less list if it is first
+ * @head: the head for your lock-less list
+ * @this: a list entry.
+ *
+ * If head of the list is given entry, delete and return %true else
+ * return %false.
+ *
+ * Multiple callers can safely call this concurrently with multiple
+ * llist_add() callers, providing all the callers offer a different @this.
+ */
+bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this)
+{
+ struct llist_node *entry, *next;
+
+ /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */
+ entry = smp_load_acquire(&head->first);
+ do {
+ if (entry != this)
+ return false;
+ next = READ_ONCE(entry->next);
+ } while (!try_cmpxchg(&head->first, &entry, next));
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(llist_del_first_this);
+
+/**
* llist_reverse_order - reverse order of a llist chain
* @head: first item of the list to be reversed
*
diff --git a/lib/lwq.c b/lib/lwq.c
new file mode 100644
index 0000000000..57d080a4d5
--- /dev/null
+++ b/lib/lwq.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Light-weight single-linked queue.
+ *
+ * Entries are enqueued to the head of an llist, with no blocking.
+ * This can happen in any context.
+ *
+ * Entries are dequeued using a spinlock to protect against multiple
+ * access. The llist is staged in reverse order, and refreshed
+ * from the llist when it exhausts.
+ *
+ * This is particularly suitable when work items are queued in BH or
+ * IRQ context, and where work items are handled one at a time by
+ * dedicated threads.
+ */
+#include <linux/rcupdate.h>
+#include <linux/lwq.h>
+
+struct llist_node *__lwq_dequeue(struct lwq *q)
+{
+ struct llist_node *this;
+
+ if (lwq_empty(q))
+ return NULL;
+ spin_lock(&q->lock);
+ this = q->ready;
+ if (!this && !llist_empty(&q->new)) {
+ /* ensure queue doesn't appear transiently lwq_empty */
+ smp_store_release(&q->ready, (void *)1);
+ this = llist_reverse_order(llist_del_all(&q->new));
+ if (!this)
+ q->ready = NULL;
+ }
+ if (this)
+ q->ready = llist_next(this);
+ spin_unlock(&q->lock);
+ return this;
+}
+EXPORT_SYMBOL_GPL(__lwq_dequeue);
+
+/**
+ * lwq_dequeue_all - dequeue all currently enqueued objects
+ * @q: the queue to dequeue from
+ *
+ * Remove and return a linked list of llist_nodes of all the objects that were
+ * in the queue. The first on the list will be the object that was least
+ * recently enqueued.
+ */
+struct llist_node *lwq_dequeue_all(struct lwq *q)
+{
+ struct llist_node *r, *t, **ep;
+
+ if (lwq_empty(q))
+ return NULL;
+
+ spin_lock(&q->lock);
+ r = q->ready;
+ q->ready = NULL;
+ t = llist_del_all(&q->new);
+ spin_unlock(&q->lock);
+ ep = &r;
+ while (*ep)
+ ep = &(*ep)->next;
+ *ep = llist_reverse_order(t);
+ return r;
+}
+EXPORT_SYMBOL_GPL(lwq_dequeue_all);
+
+#if IS_ENABLED(CONFIG_LWQ_TEST)
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wait_bit.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+struct tnode {
+ struct lwq_node n;
+ int i;
+ int c;
+};
+
+static int lwq_exercise(void *qv)
+{
+ struct lwq *q = qv;
+ int cnt;
+ struct tnode *t;
+
+ for (cnt = 0; cnt < 10000; cnt++) {
+ wait_var_event(q, (t = lwq_dequeue(q, struct tnode, n)) != NULL);
+ t->c++;
+ if (lwq_enqueue(&t->n, q))
+ wake_up_var(q);
+ }
+ while (!kthread_should_stop())
+ schedule_timeout_idle(1);
+ return 0;
+}
+
+static int lwq_test(void)
+{
+ int i;
+ struct lwq q;
+ struct llist_node *l, **t1, *t2;
+ struct tnode *t;
+ struct task_struct *threads[8];
+
+ printk(KERN_INFO "testing lwq....\n");
+ lwq_init(&q);
+ printk(KERN_INFO " lwq: run some threads\n");
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ threads[i] = kthread_run(lwq_exercise, &q, "lwq-test-%d", i);
+ for (i = 0; i < 100; i++) {
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ break;
+ t->i = i;
+ t->c = 0;
+ if (lwq_enqueue(&t->n, &q))
+ wake_up_var(&q);
+ }
+ /* wait for threads to exit */
+ for (i = 0; i < ARRAY_SIZE(threads); i++)
+ if (!IS_ERR_OR_NULL(threads[i]))
+ kthread_stop(threads[i]);
+ printk(KERN_INFO " lwq: dequeue first 50:");
+ for (i = 0; i < 50 ; i++) {
+ if (i && (i % 10) == 0) {
+ printk(KERN_CONT "\n");
+ printk(KERN_INFO " lwq: ... ");
+ }
+ t = lwq_dequeue(&q, struct tnode, n);
+ if (t)
+ printk(KERN_CONT " %d(%d)", t->i, t->c);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ l = lwq_dequeue_all(&q);
+ printk(KERN_INFO " lwq: delete the multiples of 3 (test lwq_for_each_safe())\n");
+ lwq_for_each_safe(t, t1, t2, &l, n) {
+ if ((t->i % 3) == 0) {
+ t->i = -1;
+ kfree(t);
+ t = NULL;
+ }
+ }
+ if (l)
+ lwq_enqueue_batch(l, &q);
+ printk(KERN_INFO " lwq: dequeue remaining:");
+ while ((t = lwq_dequeue(&q, struct tnode, n)) != NULL) {
+ printk(KERN_CONT " %d", t->i);
+ kfree(t);
+ }
+ printk(KERN_CONT "\n");
+ return 0;
+}
+
+module_init(lwq_test);
+#endif /* CONFIG_LWQ_TEST*/
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 7a2b6c38fd..dc15e7888f 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -134,6 +134,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
range->max = U32_MAX;
break;
case NLA_U64:
+ case NLA_UINT:
case NLA_MSECS:
range->max = U64_MAX;
break;
@@ -183,6 +184,9 @@ static int nla_validate_range_unsigned(const struct nla_policy *pt,
case NLA_U64:
value = nla_get_u64(nla);
break;
+ case NLA_UINT:
+ value = nla_get_uint(nla);
+ break;
case NLA_MSECS:
value = nla_get_u64(nla);
break;
@@ -248,6 +252,7 @@ void nla_get_range_signed(const struct nla_policy *pt,
range->max = S32_MAX;
break;
case NLA_S64:
+ case NLA_SINT:
range->min = S64_MIN;
range->max = S64_MAX;
break;
@@ -295,6 +300,9 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
case NLA_S64:
value = nla_get_s64(nla);
break;
+ case NLA_SINT:
+ value = nla_get_sint(nla);
+ break;
default:
return -EINVAL;
}
@@ -320,6 +328,7 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U16:
case NLA_U32:
case NLA_U64:
+ case NLA_UINT:
case NLA_MSECS:
case NLA_BINARY:
case NLA_BE16:
@@ -329,6 +338,7 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_S16:
case NLA_S32:
case NLA_S64:
+ case NLA_SINT:
return nla_validate_int_range_signed(pt, nla, extack);
default:
WARN_ON(1);
@@ -355,6 +365,9 @@ static int nla_validate_mask(const struct nla_policy *pt,
case NLA_U64:
value = nla_get_u64(nla);
break;
+ case NLA_UINT:
+ value = nla_get_uint(nla);
+ break;
case NLA_BE16:
value = ntohs(nla_get_be16(nla));
break;
@@ -433,6 +446,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
goto out_err;
break;
+ case NLA_SINT:
+ case NLA_UINT:
+ if (attrlen != sizeof(u32) && attrlen != sizeof(u64)) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+ break;
+
case NLA_BITFIELD32:
if (attrlen != sizeof(struct nla_bitfield32))
goto out_err;
diff --git a/lib/objpool.c b/lib/objpool.c
new file mode 100644
index 0000000000..cfdc024208
--- /dev/null
+++ b/lib/objpool.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/objpool.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/atomic.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+#include <linux/log2.h>
+
+/*
+ * objpool: ring-array based lockless MPMC/FIFO queues
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ */
+
+/* initialize percpu objpool_slot */
+static int
+objpool_init_percpu_slot(struct objpool_head *pool,
+ struct objpool_slot *slot,
+ int nodes, void *context,
+ objpool_init_obj_cb objinit)
+{
+ void *obj = (void *)&slot->entries[pool->capacity];
+ int i;
+
+ /* initialize elements of percpu objpool_slot */
+ slot->mask = pool->capacity - 1;
+
+ for (i = 0; i < nodes; i++) {
+ if (objinit) {
+ int rc = objinit(obj, context);
+ if (rc)
+ return rc;
+ }
+ slot->entries[slot->tail & slot->mask] = obj;
+ obj = obj + pool->obj_size;
+ slot->tail++;
+ slot->last = slot->tail;
+ pool->nr_objs++;
+ }
+
+ return 0;
+}
+
+/* allocate and initialize percpu slots */
+static int
+objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
+ void *context, objpool_init_obj_cb objinit)
+{
+ int i, cpu_count = 0;
+
+ for (i = 0; i < pool->nr_cpus; i++) {
+
+ struct objpool_slot *slot;
+ int nodes, size, rc;
+
+ /* skip the cpu node which could never be present */
+ if (!cpu_possible(i))
+ continue;
+
+ /* compute how many objects to be allocated with this slot */
+ nodes = nr_objs / num_possible_cpus();
+ if (cpu_count < (nr_objs % num_possible_cpus()))
+ nodes++;
+ cpu_count++;
+
+ size = struct_size(slot, entries, pool->capacity) +
+ pool->obj_size * nodes;
+
+ /*
+ * here we allocate percpu-slot & objs together in a single
+ * allocation to make it more compact, taking advantage of
+ * warm caches and TLB hits. in default vmalloc is used to
+ * reduce the pressure of kernel slab system. as we know,
+ * mimimal size of vmalloc is one page since vmalloc would
+ * always align the requested size to page size
+ */
+ if (pool->gfp & GFP_ATOMIC)
+ slot = kmalloc_node(size, pool->gfp, cpu_to_node(i));
+ else
+ slot = __vmalloc_node(size, sizeof(void *), pool->gfp,
+ cpu_to_node(i), __builtin_return_address(0));
+ if (!slot)
+ return -ENOMEM;
+ memset(slot, 0, size);
+ pool->cpu_slots[i] = slot;
+
+ /* initialize the objpool_slot of cpu node i */
+ rc = objpool_init_percpu_slot(pool, slot, nodes, context, objinit);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+/* cleanup all percpu slots of the object pool */
+static void objpool_fini_percpu_slots(struct objpool_head *pool)
+{
+ int i;
+
+ if (!pool->cpu_slots)
+ return;
+
+ for (i = 0; i < pool->nr_cpus; i++)
+ kvfree(pool->cpu_slots[i]);
+ kfree(pool->cpu_slots);
+}
+
+/* initialize object pool and pre-allocate objects */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release)
+{
+ int rc, capacity, slot_size;
+
+ /* check input parameters */
+ if (nr_objs <= 0 || nr_objs > OBJPOOL_NR_OBJECT_MAX ||
+ object_size <= 0 || object_size > OBJPOOL_OBJECT_SIZE_MAX)
+ return -EINVAL;
+
+ /* align up to unsigned long size */
+ object_size = ALIGN(object_size, sizeof(long));
+
+ /* calculate capacity of percpu objpool_slot */
+ capacity = roundup_pow_of_two(nr_objs);
+ if (!capacity)
+ return -EINVAL;
+
+ /* initialize objpool pool */
+ memset(pool, 0, sizeof(struct objpool_head));
+ pool->nr_cpus = nr_cpu_ids;
+ pool->obj_size = object_size;
+ pool->capacity = capacity;
+ pool->gfp = gfp & ~__GFP_ZERO;
+ pool->context = context;
+ pool->release = release;
+ slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
+ pool->cpu_slots = kzalloc(slot_size, pool->gfp);
+ if (!pool->cpu_slots)
+ return -ENOMEM;
+
+ /* initialize per-cpu slots */
+ rc = objpool_init_percpu_slots(pool, nr_objs, context, objinit);
+ if (rc)
+ objpool_fini_percpu_slots(pool);
+ else
+ refcount_set(&pool->ref, pool->nr_objs + 1);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(objpool_init);
+
+/* adding object to slot, abort if the slot was already full */
+static inline int
+objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ uint32_t head, tail;
+
+ /* loading tail and head as a local snapshot, tail first */
+ tail = READ_ONCE(slot->tail);
+
+ do {
+ head = READ_ONCE(slot->head);
+ /* fault caught: something must be wrong */
+ WARN_ON_ONCE(tail - head > pool->nr_objs);
+ } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
+
+ /* now the tail position is reserved for the given obj */
+ WRITE_ONCE(slot->entries[tail & slot->mask], obj);
+ /* update sequence to make this obj available for pop() */
+ smp_store_release(&slot->last, tail + 1);
+
+ return 0;
+}
+
+/* reclaim an object to object pool */
+int objpool_push(void *obj, struct objpool_head *pool)
+{
+ unsigned long flags;
+ int rc;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+ rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
+ raw_local_irq_restore(flags);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(objpool_push);
+
+/* try to retrieve object from slot */
+static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
+{
+ struct objpool_slot *slot = pool->cpu_slots[cpu];
+ /* load head snapshot, other cpus may change it */
+ uint32_t head = smp_load_acquire(&slot->head);
+
+ while (head != READ_ONCE(slot->last)) {
+ void *obj;
+
+ /*
+ * data visibility of 'last' and 'head' could be out of
+ * order since memory updating of 'last' and 'head' are
+ * performed in push() and pop() independently
+ *
+ * before any retrieving attempts, pop() must guarantee
+ * 'last' is behind 'head', that is to say, there must
+ * be available objects in slot, which could be ensured
+ * by condition 'last != head && last - head <= nr_objs'
+ * that is equivalent to 'last - head - 1 < nr_objs' as
+ * 'last' and 'head' are both unsigned int32
+ */
+ if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
+ head = READ_ONCE(slot->head);
+ continue;
+ }
+
+ /* obj must be retrieved before moving forward head */
+ obj = READ_ONCE(slot->entries[head & slot->mask]);
+
+ /* move head forward to mark it's consumption */
+ if (try_cmpxchg_release(&slot->head, &head, head + 1))
+ return obj;
+ }
+
+ return NULL;
+}
+
+/* allocate an object from object pool */
+void *objpool_pop(struct objpool_head *pool)
+{
+ void *obj = NULL;
+ unsigned long flags;
+ int i, cpu;
+
+ /* disable local irq to avoid preemption & interruption */
+ raw_local_irq_save(flags);
+
+ cpu = raw_smp_processor_id();
+ for (i = 0; i < num_possible_cpus(); i++) {
+ obj = objpool_try_get_slot(pool, cpu);
+ if (obj)
+ break;
+ cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
+ }
+ raw_local_irq_restore(flags);
+
+ return obj;
+}
+EXPORT_SYMBOL_GPL(objpool_pop);
+
+/* release whole objpool forcely */
+void objpool_free(struct objpool_head *pool)
+{
+ if (!pool->cpu_slots)
+ return;
+
+ /* release percpu slots */
+ objpool_fini_percpu_slots(pool);
+
+ /* call user's cleanup callback if provided */
+ if (pool->release)
+ pool->release(pool, pool->context);
+}
+EXPORT_SYMBOL_GPL(objpool_free);
+
+/* drop the allocated object, rather reclaim it to objpool */
+int objpool_drop(void *obj, struct objpool_head *pool)
+{
+ if (!obj || !pool)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&pool->ref)) {
+ objpool_free(pool);
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(objpool_drop);
+
+/* drop unused objects and defref objpool for releasing */
+void objpool_fini(struct objpool_head *pool)
+{
+ int count = 1; /* extra ref for objpool itself */
+
+ /* drop all remained objects from objpool */
+ while (objpool_pop(pool))
+ count++;
+
+ if (refcount_sub_and_test(count, &pool->ref))
+ objpool_free(pool);
+}
+EXPORT_SYMBOL_GPL(objpool_fini);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9073430dc8..44dd133594 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -278,6 +278,85 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
}
EXPORT_SYMBOL(__percpu_counter_compare);
+/*
+ * Compare counter, and add amount if total is: less than or equal to limit if
+ * amount is positive, or greater than or equal to limit if amount is negative.
+ * Return true if amount is added, or false if total would be beyond the limit.
+ *
+ * Negative limit is allowed, but unusual.
+ * When negative amounts (subs) are given to percpu_counter_limited_add(),
+ * the limit would most naturally be 0 - but other limits are also allowed.
+ *
+ * Overflow beyond S64_MAX is not allowed for: counter, limit and amount
+ * are all assumed to be sane (far from S64_MIN and S64_MAX).
+ */
+bool __percpu_counter_limited_add(struct percpu_counter *fbc,
+ s64 limit, s64 amount, s32 batch)
+{
+ s64 count;
+ s64 unknown;
+ unsigned long flags;
+ bool good = false;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ unknown = batch * num_online_cpus();
+ count = __this_cpu_read(*fbc->counters);
+
+ /* Skip taking the lock when safe */
+ if (abs(count + amount) <= batch &&
+ ((amount > 0 && fbc->count + unknown <= limit) ||
+ (amount < 0 && fbc->count - unknown >= limit))) {
+ this_cpu_add(*fbc->counters, amount);
+ local_irq_restore(flags);
+ return true;
+ }
+
+ raw_spin_lock(&fbc->lock);
+ count = fbc->count + amount;
+
+ /* Skip percpu_counter_sum() when safe */
+ if (amount > 0) {
+ if (count - unknown > limit)
+ goto out;
+ if (count + unknown <= limit)
+ good = true;
+ } else {
+ if (count + unknown < limit)
+ goto out;
+ if (count - unknown >= limit)
+ good = true;
+ }
+
+ if (!good) {
+ s32 *pcount;
+ int cpu;
+
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count += *pcount;
+ }
+ if (amount > 0) {
+ if (count > limit)
+ goto out;
+ } else {
+ if (count < limit)
+ goto out;
+ }
+ good = true;
+ }
+
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count + amount;
+ __this_cpu_sub(*fbc->counters, count);
+out:
+ raw_spin_unlock(&fbc->lock);
+ local_irq_restore(flags);
+ return good;
+}
+
static int __init percpu_counter_startup(void)
{
int ret;
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 035b0a4db4..1c5420ff25 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_RAID6_PQ) += raid6_pq.o
raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
- int8.o int16.o int32.o
+ int8.o
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
@@ -55,7 +55,7 @@ endif
quiet_cmd_unroll = UNROLL $@
cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
-targets += int1.c int2.c int4.c int8.c int16.c int32.c
+targets += int1.c int2.c int4.c int8.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 0ec534faf0..cd2e88ee1f 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -81,10 +81,6 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_lsx,
#endif
#endif
-#if defined(__ia64__)
- &raid6_intx32,
- &raid6_intx16,
-#endif
&raid6_intx8,
&raid6_intx4,
&raid6_intx2,
diff --git a/lib/raid6/int.uc b/lib/raid6/int.uc
index 558aeac934..1ba56c3fa4 100644
--- a/lib/raid6/int.uc
+++ b/lib/raid6/int.uc
@@ -42,13 +42,6 @@ typedef u32 unative_t;
/*
- * IA-64 wants insane amounts of unrolling. On other architectures that
- * is just a waste of space.
- */
-#if ($# <= 8) || defined(__ia64__)
-
-
-/*
* These sub-operations are separate inlines since they can sometimes be
* specially optimized using architecture-specific hacks.
*/
@@ -152,5 +145,3 @@ const struct raid6_calls raid6_intx$# = {
"int" NSTRING "x$#",
0
};
-
-#endif
diff --git a/lib/rcuref.c b/lib/rcuref.c
index 5ec00a4a64..97f300eca9 100644
--- a/lib/rcuref.c
+++ b/lib/rcuref.c
@@ -248,7 +248,7 @@ bool rcuref_put_slowpath(rcuref_t *ref)
* require a retry. If this fails the caller is not
* allowed to deconstruct the object.
*/
- if (atomic_cmpxchg_release(&ref->refcnt, RCUREF_NOREF, RCUREF_DEAD) != RCUREF_NOREF)
+ if (!atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD))
return false;
/*
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 45c450f423..010c730ca7 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -109,9 +109,7 @@ void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
if (s->size == 0 || s->len == 0)
return;
- seq_buf_terminate(s);
-
- start = s->buffer;
+ start = seq_buf_str(s);
while ((lf = strchr(start, '\n'))) {
int len = lf - start + 1;
@@ -189,6 +187,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_puts);
/**
* seq_buf_putc - sequence printing of simple character
@@ -210,6 +209,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
seq_buf_set_overflow(s);
return -1;
}
+EXPORT_SYMBOL_GPL(seq_buf_putc);
/**
* seq_buf_putmem - write raw data into the sequenc buffer
@@ -324,23 +324,24 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
* seq_buf_to_user - copy the sequence buffer to user space
* @s: seq_buf descriptor
* @ubuf: The userspace memory location to copy to
+ * @start: The first byte in the buffer to copy
* @cnt: The amount to copy
*
* Copies the sequence buffer into the userspace memory pointed to
- * by @ubuf. It starts from the last read position (@s->readpos)
- * and writes up to @cnt characters or till it reaches the end of
- * the content in the buffer (@s->len), which ever comes first.
+ * by @ubuf. It starts from @start and writes up to @cnt characters
+ * or until it reaches the end of the content in the buffer (@s->len),
+ * whichever comes first.
*
* On success, it returns a positive number of the number of bytes
* it copied.
*
* On failure it returns -EBUSY if all of the content in the
* sequence has been already read, which includes nothing in the
- * sequence (@s->len == @s->readpos).
+ * sequence (@s->len == @start).
*
* Returns -EFAULT if the copy to userspace fails.
*/
-int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
+int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
{
int len;
int ret;
@@ -350,20 +351,17 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
len = seq_buf_used(s);
- if (len <= s->readpos)
+ if (len <= start)
return -EBUSY;
- len -= s->readpos;
+ len -= start;
if (cnt > len)
cnt = len;
- ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+ ret = copy_to_user(ubuf, s->buffer + start, cnt);
if (ret == cnt)
return -EFAULT;
- cnt -= ret;
-
- s->readpos += cnt;
- return cnt;
+ return cnt - ret;
}
/**
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 9982344cca..7713f73e66 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -31,9 +31,11 @@
* giving the size in the required units. @buf should have room for
* at least 9 bytes and will always be zero terminated.
*
+ * Return value: number of characters of output that would have been written
+ * (which may be greater than len, if output was truncated).
*/
-void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
- char *buf, int len)
+int string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ char *buf, int len)
{
static const char *const units_10[] = {
"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"
@@ -126,8 +128,8 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
else
unit = units_str[units][i];
- snprintf(buf, len, "%u%s %s", (u32)size,
- tmp, unit);
+ return snprintf(buf, len, "%u%s %s", (u32)size,
+ tmp, unit);
}
EXPORT_SYMBOL(string_get_size);
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index f2ea9f30c7..65f22c2578 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -330,6 +330,29 @@ static void __init test_copy(void)
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
}
+static void __init test_bitmap_region(void)
+{
+ int pos, order;
+
+ DECLARE_BITMAP(bmap, 1000);
+
+ bitmap_zero(bmap, 1000);
+
+ for (order = 0; order < 10; order++) {
+ pos = bitmap_find_free_region(bmap, 1000, order);
+ if (order == 0)
+ expect_eq_uint(pos, 0);
+ else
+ expect_eq_uint(pos, order < 9 ? BIT(order) : -ENOMEM);
+ }
+
+ bitmap_release_region(bmap, 0, 0);
+ for (order = 1; order < 9; order++)
+ bitmap_release_region(bmap, BIT(order), order);
+
+ expect_eq_uint(bitmap_weight(bmap, 1000), 0);
+}
+
#define EXP2_IN_BITS (sizeof(exp2) * 8)
static void __init test_replace(void)
@@ -1227,6 +1250,7 @@ static void __init selftest(void)
test_zero_clear();
test_fill_set();
test_copy();
+ test_bitmap_region();
test_replace();
test_bitmap_arr32();
test_bitmap_arr64();
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index ecde421620..3c5a1ca062 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5111,6 +5111,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffffffff } }
},
+ /* MOVSX32 */
+ {
+ "ALU_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000ffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_MOVSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX32_REG(R1, R3, 32),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ /* MOVSX64 REG */
+ {
+ "ALU64_MOVSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffffefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 8),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffffffbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 16),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_MOVSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefLL),
+ BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
+ BPF_MOVSX64_REG(R1, R3, 32),
+ BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
@@ -6105,6 +6203,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2 } },
},
+ /* BPF_ALU | BPF_DIV | BPF_X off=1 (SDIV) */
+ {
+ "ALU_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_DIV | BPF_K off=1 (SDIV) */
+ {
+ "ALU_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_X off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_X: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_DIV, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU64 | BPF_DIV | BPF_K off=1 (SDIV64) */
+ {
+ "ALU64_SDIV_K: -6 / 2 = -3",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -6),
+ BPF_ALU64_IMM_OFF(BPF_DIV, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -3 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_X off=1 (SMOD) */
+ {
+ "ALU_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU32_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU | BPF_MOD | BPF_K off=1 (SMOD) */
+ {
+ "ALU_SMOD_K: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_X off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_X: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU32_IMM(BPF_MOV, R1, 2),
+ BPF_ALU64_REG_OFF(BPF_MOD, R0, R1, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
+ /* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
+ {
+ "ALU64_SMOD_K: -7 % 2 = -1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, -7),
+ BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } },
+ },
/* BPF_ALU | BPF_AND | BPF_X */
{
"ALU_AND_X: 3 & 2 = 2",
@@ -7837,6 +8035,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
},
+ /* BSWAP */
+ {
+ "BSWAP 16: 0x0123456789abcdef -> 0xefcd",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcd } },
+ },
+ {
+ "BSWAP 32: 0x0123456789abcdef -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef -> 0x67452301",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x67452301 } },
+ },
+ {
+ "BSWAP 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xefcdab89 } },
+ },
+ /* BSWAP, reversed */
+ {
+ "BSWAP 16: 0xfedcba9876543210 -> 0x1032",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 16),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1032 } },
+ },
+ {
+ "BSWAP 32: 0xfedcba9876543210 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 32),
+ BPF_ALU64_REG(BPF_MOV, R1, R0),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 -> 0x98badcfe",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x98badcfe } },
+ },
+ {
+ "BSWAP 64: 0xfedcba9876543210 >> 32 -> 0x10325476",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
+ BPF_BSWAP(R0, 64),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x10325476 } },
+ },
/* BPF_LDX_MEM B/H/W/DW */
{
"BPF_LDX_MEM | BPF_B, base",
@@ -8228,6 +8524,67 @@ static struct bpf_test tests[] = {
{ { 32, 0 } },
.stack_depth = 0,
},
+ /* BPF_LDX_MEMSX B/H/W */
+ {
+ "BPF_LDX_MEMSX | BPF_B",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead0000000000f0ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffffff0ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -1),
+#else
+ BPF_LDX_MEMSX(BPF_B, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_H",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xdead00000000f123ULL),
+ BPF_LD_IMM64(R2, 0xfffffffffffff123ULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -2),
+#else
+ BPF_LDX_MEMSX(BPF_H, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
+ {
+ "BPF_LDX_MEMSX | BPF_W",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x00000000deadbeefULL),
+ BPF_LD_IMM64(R2, 0xffffffffdeadbeefULL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -8),
+#ifdef __BIG_ENDIAN
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -4),
+#else
+ BPF_LDX_MEMSX(BPF_W, R0, R10, -8),
+#endif
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } },
+ .stack_depth = 8,
+ },
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@@ -9474,6 +9831,20 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ /* BPF_JMP32 | BPF_JA */
+ {
+ "JMP32_JA: Unconditional jump: if (true) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JA, 0, 1, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",
diff --git a/lib/test_objpool.c b/lib/test_objpool.c
new file mode 100644
index 0000000000..bfdb815998
--- /dev/null
+++ b/lib/test_objpool.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Test module for lockless object pool
+ *
+ * Copyright: wuqiang.matt@bytedance.com
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/objpool.h>
+
+#define OT_NR_MAX_BULK (16)
+
+/* memory usage */
+struct ot_mem_stat {
+ atomic_long_t alloc;
+ atomic_long_t free;
+};
+
+/* object allocation results */
+struct ot_obj_stat {
+ unsigned long nhits;
+ unsigned long nmiss;
+};
+
+/* control & results per testcase */
+struct ot_data {
+ struct rw_semaphore start;
+ struct completion wait;
+ struct completion rcu;
+ atomic_t nthreads ____cacheline_aligned_in_smp;
+ atomic_t stop ____cacheline_aligned_in_smp;
+ struct ot_mem_stat kmalloc;
+ struct ot_mem_stat vmalloc;
+ struct ot_obj_stat objects;
+ u64 duration;
+};
+
+/* testcase */
+struct ot_test {
+ int async; /* synchronous or asynchronous */
+ int mode; /* only mode 0 supported */
+ int objsz; /* object size */
+ int duration; /* ms */
+ int delay; /* ms */
+ int bulk_normal;
+ int bulk_irq;
+ unsigned long hrtimer; /* ms */
+ const char *name;
+ struct ot_data data;
+};
+
+/* per-cpu worker */
+struct ot_item {
+ struct objpool_head *pool; /* pool head */
+ struct ot_test *test; /* test parameters */
+
+ void (*worker)(struct ot_item *item, int irq);
+
+ /* hrtimer control */
+ ktime_t hrtcycle;
+ struct hrtimer hrtimer;
+
+ int bulk[2]; /* for thread and irq */
+ int delay;
+ u32 niters;
+
+ /* summary per thread */
+ struct ot_obj_stat stat[2]; /* thread and irq */
+ u64 duration;
+};
+
+/*
+ * memory leakage checking
+ */
+
+static void *ot_kzalloc(struct ot_test *test, long size)
+{
+ void *ptr = kzalloc(size, GFP_KERNEL);
+
+ if (ptr)
+ atomic_long_add(size, &test->data.kmalloc.alloc);
+ return ptr;
+}
+
+static void ot_kfree(struct ot_test *test, void *ptr, long size)
+{
+ if (!ptr)
+ return;
+ atomic_long_add(size, &test->data.kmalloc.free);
+ kfree(ptr);
+}
+
+static void ot_mem_report(struct ot_test *test)
+{
+ long alloc, free;
+
+ pr_info("memory allocation summary for %s\n", test->name);
+
+ alloc = atomic_long_read(&test->data.kmalloc.alloc);
+ free = atomic_long_read(&test->data.kmalloc.free);
+ pr_info(" kmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
+
+ alloc = atomic_long_read(&test->data.vmalloc.alloc);
+ free = atomic_long_read(&test->data.vmalloc.free);
+ pr_info(" vmalloc: %lu - %lu = %lu\n", alloc, free, alloc - free);
+}
+
+/* user object instance */
+struct ot_node {
+ void *owner;
+ unsigned long data;
+ unsigned long refs;
+ unsigned long payload[32];
+};
+
+/* user objpool manager */
+struct ot_context {
+ struct objpool_head pool; /* objpool head */
+ struct ot_test *test; /* test parameters */
+ void *ptr; /* user pool buffer */
+ unsigned long size; /* buffer size */
+ struct rcu_head rcu;
+};
+
+static DEFINE_PER_CPU(struct ot_item, ot_pcup_items);
+
+static int ot_init_data(struct ot_data *data)
+{
+ memset(data, 0, sizeof(*data));
+ init_rwsem(&data->start);
+ init_completion(&data->wait);
+ init_completion(&data->rcu);
+ atomic_set(&data->nthreads, 1);
+
+ return 0;
+}
+
+static int ot_init_node(void *nod, void *context)
+{
+ struct ot_context *sop = context;
+ struct ot_node *on = nod;
+
+ on->owner = &sop->pool;
+ return 0;
+}
+
+static enum hrtimer_restart ot_hrtimer_handler(struct hrtimer *hrt)
+{
+ struct ot_item *item = container_of(hrt, struct ot_item, hrtimer);
+ struct ot_test *test = item->test;
+
+ if (atomic_read_acquire(&test->data.stop))
+ return HRTIMER_NORESTART;
+
+ /* do bulk-testings for objects pop/push */
+ item->worker(item, 1);
+
+ hrtimer_forward(hrt, hrt->base->get_time(), item->hrtcycle);
+ return HRTIMER_RESTART;
+}
+
+static void ot_start_hrtimer(struct ot_item *item)
+{
+ if (!item->test->hrtimer)
+ return;
+ hrtimer_start(&item->hrtimer, item->hrtcycle, HRTIMER_MODE_REL);
+}
+
+static void ot_stop_hrtimer(struct ot_item *item)
+{
+ if (!item->test->hrtimer)
+ return;
+ hrtimer_cancel(&item->hrtimer);
+}
+
+static int ot_init_hrtimer(struct ot_item *item, unsigned long hrtimer)
+{
+ struct hrtimer *hrt = &item->hrtimer;
+
+ if (!hrtimer)
+ return -ENOENT;
+
+ item->hrtcycle = ktime_set(0, hrtimer * 1000000UL);
+ hrtimer_init(hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrt->function = ot_hrtimer_handler;
+ return 0;
+}
+
+static int ot_init_cpu_item(struct ot_item *item,
+ struct ot_test *test,
+ struct objpool_head *pool,
+ void (*worker)(struct ot_item *, int))
+{
+ memset(item, 0, sizeof(*item));
+ item->pool = pool;
+ item->test = test;
+ item->worker = worker;
+
+ item->bulk[0] = test->bulk_normal;
+ item->bulk[1] = test->bulk_irq;
+ item->delay = test->delay;
+
+ /* initialize hrtimer */
+ ot_init_hrtimer(item, item->test->hrtimer);
+ return 0;
+}
+
+static int ot_thread_worker(void *arg)
+{
+ struct ot_item *item = arg;
+ struct ot_test *test = item->test;
+ ktime_t start;
+
+ atomic_inc(&test->data.nthreads);
+ down_read(&test->data.start);
+ up_read(&test->data.start);
+ start = ktime_get();
+ ot_start_hrtimer(item);
+ do {
+ if (atomic_read_acquire(&test->data.stop))
+ break;
+ /* do bulk-testings for objects pop/push */
+ item->worker(item, 0);
+ } while (!kthread_should_stop());
+ ot_stop_hrtimer(item);
+ item->duration = (u64) ktime_us_delta(ktime_get(), start);
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ return 0;
+}
+
+static void ot_perf_report(struct ot_test *test, u64 duration)
+{
+ struct ot_obj_stat total, normal = {0}, irq = {0};
+ int cpu, nthreads = 0;
+
+ pr_info("\n");
+ pr_info("Testing summary for %s\n", test->name);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ if (!item->duration)
+ continue;
+ normal.nhits += item->stat[0].nhits;
+ normal.nmiss += item->stat[0].nmiss;
+ irq.nhits += item->stat[1].nhits;
+ irq.nmiss += item->stat[1].nmiss;
+ pr_info("CPU: %d duration: %lluus\n", cpu, item->duration);
+ pr_info("\tthread:\t%16lu hits \t%16lu miss\n",
+ item->stat[0].nhits, item->stat[0].nmiss);
+ pr_info("\tirq: \t%16lu hits \t%16lu miss\n",
+ item->stat[1].nhits, item->stat[1].nmiss);
+ pr_info("\ttotal: \t%16lu hits \t%16lu miss\n",
+ item->stat[0].nhits + item->stat[1].nhits,
+ item->stat[0].nmiss + item->stat[1].nmiss);
+ nthreads++;
+ }
+
+ total.nhits = normal.nhits + irq.nhits;
+ total.nmiss = normal.nmiss + irq.nmiss;
+
+ pr_info("ALL: \tnthreads: %d duration: %lluus\n", nthreads, duration);
+ pr_info("SUM: \t%16lu hits \t%16lu miss\n",
+ total.nhits, total.nmiss);
+
+ test->data.objects = total;
+ test->data.duration = duration;
+}
+
+/*
+ * synchronous test cases for objpool manipulation
+ */
+
+/* objpool manipulation for synchronous mode (percpu objpool) */
+static struct ot_context *ot_init_sync_m0(struct ot_test *test)
+{
+ struct ot_context *sop = NULL;
+ int max = num_possible_cpus() << 3;
+ gfp_t gfp = GFP_KERNEL;
+
+ sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
+ if (!sop)
+ return NULL;
+ sop->test = test;
+ if (test->objsz < 512)
+ gfp = GFP_ATOMIC;
+
+ if (objpool_init(&sop->pool, max, test->objsz,
+ gfp, sop, ot_init_node, NULL)) {
+ ot_kfree(test, sop, sizeof(*sop));
+ return NULL;
+ }
+ WARN_ON(max != sop->pool.nr_objs);
+
+ return sop;
+}
+
+static void ot_fini_sync(struct ot_context *sop)
+{
+ objpool_fini(&sop->pool);
+ ot_kfree(sop->test, sop, sizeof(*sop));
+}
+
+static struct {
+ struct ot_context * (*init)(struct ot_test *oc);
+ void (*fini)(struct ot_context *sop);
+} g_ot_sync_ops[] = {
+ {.init = ot_init_sync_m0, .fini = ot_fini_sync},
+};
+
+/*
+ * synchronous test cases: performance mode
+ */
+
+static void ot_bulk_sync(struct ot_item *item, int irq)
+{
+ struct ot_node *nods[OT_NR_MAX_BULK];
+ int i;
+
+ for (i = 0; i < item->bulk[irq]; i++)
+ nods[i] = objpool_pop(item->pool);
+
+ if (!irq && (item->delay || !(++(item->niters) & 0x7FFF)))
+ msleep(item->delay);
+
+ while (i-- > 0) {
+ struct ot_node *on = nods[i];
+ if (on) {
+ on->refs++;
+ objpool_push(on, item->pool);
+ item->stat[irq].nhits++;
+ } else {
+ item->stat[irq].nmiss++;
+ }
+ }
+}
+
+static int ot_start_sync(struct ot_test *test)
+{
+ struct ot_context *sop;
+ ktime_t start;
+ u64 duration;
+ unsigned long timeout;
+ int cpu;
+
+ /* initialize objpool for syncrhonous testcase */
+ sop = g_ot_sync_ops[test->mode].init(test);
+ if (!sop)
+ return -ENOMEM;
+
+ /* grab rwsem to block testing threads */
+ down_write(&test->data.start);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ struct task_struct *work;
+
+ ot_init_cpu_item(item, test, &sop->pool, ot_bulk_sync);
+
+ /* skip offline cpus */
+ if (!cpu_online(cpu))
+ continue;
+
+ work = kthread_create_on_node(ot_thread_worker, item,
+ cpu_to_node(cpu), "ot_worker_%d", cpu);
+ if (IS_ERR(work)) {
+ pr_err("failed to create thread for cpu %d\n", cpu);
+ } else {
+ kthread_bind(work, cpu);
+ wake_up_process(work);
+ }
+ }
+
+ /* wait a while to make sure all threads waiting at start line */
+ msleep(20);
+
+ /* in case no threads were created: memory insufficient ? */
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ // sched_set_fifo_low(current);
+
+ /* start objpool testing threads */
+ start = ktime_get();
+ up_write(&test->data.start);
+
+ /* yeild cpu to worker threads for duration ms */
+ timeout = msecs_to_jiffies(test->duration);
+ schedule_timeout_interruptible(timeout);
+
+ /* tell workers threads to quit */
+ atomic_set_release(&test->data.stop, 1);
+
+ /* wait all workers threads finish and quit */
+ wait_for_completion(&test->data.wait);
+ duration = (u64) ktime_us_delta(ktime_get(), start);
+
+ /* cleanup objpool */
+ g_ot_sync_ops[test->mode].fini(sop);
+
+ /* report testing summary and performance results */
+ ot_perf_report(test, duration);
+
+ /* report memory allocation summary */
+ ot_mem_report(test);
+
+ return 0;
+}
+
+/*
+ * asynchronous test cases: pool lifecycle controlled by refcount
+ */
+
+static void ot_fini_async_rcu(struct rcu_head *rcu)
+{
+ struct ot_context *sop = container_of(rcu, struct ot_context, rcu);
+ struct ot_test *test = sop->test;
+
+ /* here all cpus are aware of the stop event: test->data.stop = 1 */
+ WARN_ON(!atomic_read_acquire(&test->data.stop));
+
+ objpool_fini(&sop->pool);
+ complete(&test->data.rcu);
+}
+
+static void ot_fini_async(struct ot_context *sop)
+{
+ /* make sure the stop event is acknowledged by all cores */
+ call_rcu(&sop->rcu, ot_fini_async_rcu);
+}
+
+static int ot_objpool_release(struct objpool_head *head, void *context)
+{
+ struct ot_context *sop = context;
+
+ WARN_ON(!head || !sop || head != &sop->pool);
+
+ /* do context cleaning if needed */
+ if (sop)
+ ot_kfree(sop->test, sop, sizeof(*sop));
+
+ return 0;
+}
+
+static struct ot_context *ot_init_async_m0(struct ot_test *test)
+{
+ struct ot_context *sop = NULL;
+ int max = num_possible_cpus() << 3;
+ gfp_t gfp = GFP_KERNEL;
+
+ sop = (struct ot_context *)ot_kzalloc(test, sizeof(*sop));
+ if (!sop)
+ return NULL;
+ sop->test = test;
+ if (test->objsz < 512)
+ gfp = GFP_ATOMIC;
+
+ if (objpool_init(&sop->pool, max, test->objsz, gfp, sop,
+ ot_init_node, ot_objpool_release)) {
+ ot_kfree(test, sop, sizeof(*sop));
+ return NULL;
+ }
+ WARN_ON(max != sop->pool.nr_objs);
+
+ return sop;
+}
+
+static struct {
+ struct ot_context * (*init)(struct ot_test *oc);
+ void (*fini)(struct ot_context *sop);
+} g_ot_async_ops[] = {
+ {.init = ot_init_async_m0, .fini = ot_fini_async},
+};
+
+static void ot_nod_recycle(struct ot_node *on, struct objpool_head *pool,
+ int release)
+{
+ struct ot_context *sop;
+
+ on->refs++;
+
+ if (!release) {
+ /* push object back to opjpool for reuse */
+ objpool_push(on, pool);
+ return;
+ }
+
+ sop = container_of(pool, struct ot_context, pool);
+ WARN_ON(sop != pool->context);
+
+ /* unref objpool with nod removed forever */
+ objpool_drop(on, pool);
+}
+
+static void ot_bulk_async(struct ot_item *item, int irq)
+{
+ struct ot_test *test = item->test;
+ struct ot_node *nods[OT_NR_MAX_BULK];
+ int i, stop;
+
+ for (i = 0; i < item->bulk[irq]; i++)
+ nods[i] = objpool_pop(item->pool);
+
+ if (!irq) {
+ if (item->delay || !(++(item->niters) & 0x7FFF))
+ msleep(item->delay);
+ get_cpu();
+ }
+
+ stop = atomic_read_acquire(&test->data.stop);
+
+ /* drop all objects and deref objpool */
+ while (i-- > 0) {
+ struct ot_node *on = nods[i];
+
+ if (on) {
+ on->refs++;
+ ot_nod_recycle(on, item->pool, stop);
+ item->stat[irq].nhits++;
+ } else {
+ item->stat[irq].nmiss++;
+ }
+ }
+
+ if (!irq)
+ put_cpu();
+}
+
+static int ot_start_async(struct ot_test *test)
+{
+ struct ot_context *sop;
+ ktime_t start;
+ u64 duration;
+ unsigned long timeout;
+ int cpu;
+
+ /* initialize objpool for syncrhonous testcase */
+ sop = g_ot_async_ops[test->mode].init(test);
+ if (!sop)
+ return -ENOMEM;
+
+ /* grab rwsem to block testing threads */
+ down_write(&test->data.start);
+
+ for_each_possible_cpu(cpu) {
+ struct ot_item *item = per_cpu_ptr(&ot_pcup_items, cpu);
+ struct task_struct *work;
+
+ ot_init_cpu_item(item, test, &sop->pool, ot_bulk_async);
+
+ /* skip offline cpus */
+ if (!cpu_online(cpu))
+ continue;
+
+ work = kthread_create_on_node(ot_thread_worker, item,
+ cpu_to_node(cpu), "ot_worker_%d", cpu);
+ if (IS_ERR(work)) {
+ pr_err("failed to create thread for cpu %d\n", cpu);
+ } else {
+ kthread_bind(work, cpu);
+ wake_up_process(work);
+ }
+ }
+
+ /* wait a while to make sure all threads waiting at start line */
+ msleep(20);
+
+ /* in case no threads were created: memory insufficient ? */
+ if (atomic_dec_and_test(&test->data.nthreads))
+ complete(&test->data.wait);
+
+ /* start objpool testing threads */
+ start = ktime_get();
+ up_write(&test->data.start);
+
+ /* yeild cpu to worker threads for duration ms */
+ timeout = msecs_to_jiffies(test->duration);
+ schedule_timeout_interruptible(timeout);
+
+ /* tell workers threads to quit */
+ atomic_set_release(&test->data.stop, 1);
+
+ /* do async-finalization */
+ g_ot_async_ops[test->mode].fini(sop);
+
+ /* wait all workers threads finish and quit */
+ wait_for_completion(&test->data.wait);
+ duration = (u64) ktime_us_delta(ktime_get(), start);
+
+ /* assure rcu callback is triggered */
+ wait_for_completion(&test->data.rcu);
+
+ /*
+ * now we are sure that objpool is finalized either
+ * by rcu callback or by worker threads
+ */
+
+ /* report testing summary and performance results */
+ ot_perf_report(test, duration);
+
+ /* report memory allocation summary */
+ ot_mem_report(test);
+
+ return 0;
+}
+
+/*
+ * predefined testing cases:
+ * synchronous case / overrun case / async case
+ *
+ * async: synchronous or asynchronous testing
+ * mode: only mode 0 supported
+ * objsz: object size
+ * duration: int, total test time in ms
+ * delay: int, delay (in ms) between each iteration
+ * bulk_normal: int, repeat times for thread worker
+ * bulk_irq: int, repeat times for irq consumer
+ * hrtimer: unsigned long, hrtimer intervnal in ms
+ * name: char *, tag for current test ot_item
+ */
+
+#define NODE_COMPACT sizeof(struct ot_node)
+#define NODE_VMALLOC (512)
+
+static struct ot_test g_testcases[] = {
+
+ /* sync & normal */
+ {0, 0, NODE_COMPACT, 1000, 0, 1, 0, 0, "sync: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 1, 0, 0, "sync: percpu objpool from vmalloc"},
+
+ /* sync & hrtimer */
+ {0, 0, NODE_COMPACT, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 1, 1, 4, "sync & hrtimer: percpu objpool from vmalloc"},
+
+ /* sync & overrun */
+ {0, 0, NODE_COMPACT, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool"},
+ {0, 0, NODE_VMALLOC, 1000, 0, 16, 0, 0, "sync overrun: percpu objpool from vmalloc"},
+
+ /* async mode */
+ {1, 0, NODE_COMPACT, 1000, 100, 1, 0, 0, "async: percpu objpool"},
+ {1, 0, NODE_VMALLOC, 1000, 100, 1, 0, 0, "async: percpu objpool from vmalloc"},
+
+ /* async + hrtimer mode */
+ {1, 0, NODE_COMPACT, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool"},
+ {1, 0, NODE_VMALLOC, 1000, 0, 4, 4, 4, "async & hrtimer: percpu objpool from vmalloc"},
+};
+
+static int __init ot_mod_init(void)
+{
+ int i;
+
+ /* perform testings */
+ for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
+ ot_init_data(&g_testcases[i].data);
+ if (g_testcases[i].async)
+ ot_start_async(&g_testcases[i]);
+ else
+ ot_start_sync(&g_testcases[i]);
+ }
+
+ /* show tests summary */
+ pr_info("\n");
+ pr_info("Summary of testcases:\n");
+ for (i = 0; i < ARRAY_SIZE(g_testcases); i++) {
+ pr_info(" duration: %lluus \thits: %10lu \tmiss: %10lu \t%s\n",
+ g_testcases[i].data.duration, g_testcases[i].data.objects.nhits,
+ g_testcases[i].data.objects.nmiss, g_testcases[i].name);
+ }
+
+ return -EAGAIN;
+}
+
+static void __exit ot_mod_exit(void)
+{
+}
+
+module_init(ot_mod_init);
+module_exit(ot_mod_exit);
+
+MODULE_LICENSE("GPL"); \ No newline at end of file
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 0a559a4235..9308bcfb2a 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -32,6 +32,58 @@ ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength)
}
EXPORT_SYMBOL(ucs2_strsize);
+/**
+ * ucs2_strscpy() - Copy a UCS2 string into a sized buffer.
+ *
+ * @dst: Pointer to the destination buffer where to copy the string to.
+ * @src: Pointer to the source buffer where to copy the string from.
+ * @count: Size of the destination buffer, in UCS2 (16-bit) characters.
+ *
+ * Like strscpy(), only for UCS2 strings.
+ *
+ * Copy the source string @src, or as much of it as fits, into the destination
+ * buffer @dst. The behavior is undefined if the string buffers overlap. The
+ * destination buffer @dst is always NUL-terminated, unless it's zero-sized.
+ *
+ * Return: The number of characters copied into @dst (excluding the trailing
+ * %NUL terminator) or -E2BIG if @count is 0 or @src was truncated due to the
+ * destination buffer being too small.
+ */
+ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count)
+{
+ long res;
+
+ /*
+ * Ensure that we have a valid amount of space. We need to store at
+ * least one NUL-character.
+ */
+ if (count == 0 || WARN_ON_ONCE(count > INT_MAX / sizeof(*dst)))
+ return -E2BIG;
+
+ /*
+ * Copy at most 'count' characters, return early if we find a
+ * NUL-terminator.
+ */
+ for (res = 0; res < count; res++) {
+ ucs2_char_t c;
+
+ c = src[res];
+ dst[res] = c;
+
+ if (!c)
+ return res;
+ }
+
+ /*
+ * The loop above terminated without finding a NUL-terminator,
+ * exceeding the 'count': Enforce proper NUL-termination and return
+ * error.
+ */
+ dst[count - 1] = 0;
+ return -E2BIG;
+}
+EXPORT_SYMBOL(ucs2_strscpy);
+
int
ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
{
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2aa408441c..552738f142 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -60,7 +60,8 @@
bool no_hash_pointers __ro_after_init;
EXPORT_SYMBOL_GPL(no_hash_pointers);
-static noinline unsigned long long simple_strntoull(const char *startp, size_t max_chars, char **endp, unsigned int base)
+noinline
+static unsigned long long simple_strntoull(const char *startp, char **endp, unsigned int base, size_t max_chars)
{
const char *cp;
unsigned long long result = 0ULL;
@@ -95,7 +96,7 @@ static noinline unsigned long long simple_strntoull(const char *startp, size_t m
noinline
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base)
{
- return simple_strntoull(cp, INT_MAX, endp, base);
+ return simple_strntoull(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoull);
@@ -130,8 +131,8 @@ long simple_strtol(const char *cp, char **endp, unsigned int base)
}
EXPORT_SYMBOL(simple_strtol);
-static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
- unsigned int base)
+noinline
+static long long simple_strntoll(const char *cp, char **endp, unsigned int base, size_t max_chars)
{
/*
* simple_strntoull() safely handles receiving max_chars==0 in the
@@ -140,9 +141,9 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
* and the content of *cp is irrelevant.
*/
if (*cp == '-' && max_chars > 0)
- return -simple_strntoull(cp + 1, max_chars - 1, endp, base);
+ return -simple_strntoull(cp + 1, endp, base, max_chars - 1);
- return simple_strntoull(cp, max_chars, endp, base);
+ return simple_strntoull(cp, endp, base, max_chars);
}
/**
@@ -155,7 +156,7 @@ static long long simple_strntoll(const char *cp, size_t max_chars, char **endp,
*/
long long simple_strtoll(const char *cp, char **endp, unsigned int base)
{
- return simple_strntoll(cp, INT_MAX, endp, base);
+ return simple_strntoll(cp, endp, base, INT_MAX);
}
EXPORT_SYMBOL(simple_strtoll);
@@ -3653,13 +3654,11 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
break;
if (is_sign)
- val.s = simple_strntoll(str,
- field_width >= 0 ? field_width : INT_MAX,
- &next, base);
+ val.s = simple_strntoll(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
else
- val.u = simple_strntoull(str,
- field_width >= 0 ? field_width : INT_MAX,
- &next, base);
+ val.u = simple_strntoull(str, &next, base,
+ field_width >= 0 ? field_width : INT_MAX);
switch (qualifier) {
case 'H': /* that's 'hh' in format */
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
index adce22ac18..aef086a6bf 100644
--- a/lib/xz/Kconfig
+++ b/lib/xz/Kconfig
@@ -19,11 +19,6 @@ config XZ_DEC_POWERPC
default y
select XZ_DEC_BCJ
-config XZ_DEC_IA64
- bool "IA-64 BCJ filter decoder" if EXPERT
- default y
- select XZ_DEC_BCJ
-
config XZ_DEC_ARM
bool "ARM BCJ filter decoder" if EXPERT
default y