diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /tools/testing/selftests/kselftest | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | tools/testing/selftests/kselftest.h | 290 | ||||
-rwxr-xr-x | tools/testing/selftests/kselftest/module.sh | 84 | ||||
-rwxr-xr-x | tools/testing/selftests/kselftest/prefix.pl | 24 | ||||
-rw-r--r-- | tools/testing/selftests/kselftest/runner.sh | 120 | ||||
-rwxr-xr-x | tools/testing/selftests/kselftest_deps.sh | 325 | ||||
-rw-r--r-- | tools/testing/selftests/kselftest_harness.h | 1065 | ||||
-rwxr-xr-x | tools/testing/selftests/kselftest_install.sh | 35 | ||||
-rw-r--r-- | tools/testing/selftests/kselftest_module.h | 48 |
8 files changed, 1991 insertions, 0 deletions
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h new file mode 100644 index 000000000..8d50483fe --- /dev/null +++ b/tools/testing/selftests/kselftest.h @@ -0,0 +1,290 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * kselftest.h: low-level kselftest framework to include from + * selftest programs. When possible, please use + * kselftest_harness.h instead. + * + * Copyright (c) 2014 Shuah Khan <shuahkh@osg.samsung.com> + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * + * Using this API consists of first counting how many tests your code + * has to run, and then starting up the reporting: + * + * ksft_print_header(); + * ksft_set_plan(total_number_of_tests); + * + * For each test, report any progress, debugging, etc with: + * + * ksft_print_msg(fmt, ...); + * + * and finally report the pass/fail/skip/xfail state of the test with one of: + * + * ksft_test_result(condition, fmt, ...); + * ksft_test_result_pass(fmt, ...); + * ksft_test_result_fail(fmt, ...); + * ksft_test_result_skip(fmt, ...); + * ksft_test_result_xfail(fmt, ...); + * ksft_test_result_error(fmt, ...); + * + * When all tests are finished, clean up and exit the program with one of: + * + * ksft_exit(condition); + * ksft_exit_pass(); + * ksft_exit_fail(); + * + * If the program wants to report details on why the entire program has + * failed, it can instead exit with a message (this is usually done when + * the program is aborting before finishing all tests): + * + * ksft_exit_fail_msg(fmt, ...); + * + */ +#ifndef __KSELFTEST_H +#define __KSELFTEST_H + +#include <errno.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdarg.h> +#include <stdio.h> + +/* define kselftest exit codes */ +#define KSFT_PASS 0 +#define KSFT_FAIL 1 +#define KSFT_XFAIL 2 +#define KSFT_XPASS 3 +#define KSFT_SKIP 4 + +/* counters */ +struct ksft_count { + unsigned int ksft_pass; + unsigned int ksft_fail; + unsigned int ksft_xfail; + unsigned int ksft_xpass; + unsigned int ksft_xskip; + unsigned int ksft_error; +}; + +static struct ksft_count ksft_cnt; +static unsigned int ksft_plan; + +static inline unsigned int ksft_test_num(void) +{ + return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail + + ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass + + ksft_cnt.ksft_xskip + ksft_cnt.ksft_error; +} + +static inline void ksft_inc_pass_cnt(void) { ksft_cnt.ksft_pass++; } +static inline void ksft_inc_fail_cnt(void) { ksft_cnt.ksft_fail++; } +static inline void ksft_inc_xfail_cnt(void) { ksft_cnt.ksft_xfail++; } +static inline void ksft_inc_xpass_cnt(void) { ksft_cnt.ksft_xpass++; } +static inline void ksft_inc_xskip_cnt(void) { ksft_cnt.ksft_xskip++; } +static inline void ksft_inc_error_cnt(void) { ksft_cnt.ksft_error++; } + +static inline int ksft_get_pass_cnt(void) { return ksft_cnt.ksft_pass; } +static inline int ksft_get_fail_cnt(void) { return ksft_cnt.ksft_fail; } +static inline int ksft_get_xfail_cnt(void) { return ksft_cnt.ksft_xfail; } +static inline int ksft_get_xpass_cnt(void) { return ksft_cnt.ksft_xpass; } +static inline int ksft_get_xskip_cnt(void) { return ksft_cnt.ksft_xskip; } +static inline int ksft_get_error_cnt(void) { return ksft_cnt.ksft_error; } + +static inline void ksft_print_header(void) +{ + if (!(getenv("KSFT_TAP_LEVEL"))) + printf("TAP version 13\n"); +} + +static inline void ksft_set_plan(unsigned int plan) +{ + ksft_plan = plan; + printf("1..%d\n", ksft_plan); +} + +static inline void ksft_print_cnts(void) +{ + if (ksft_plan != ksft_test_num()) + printf("# Planned tests != run tests (%u != %u)\n", + ksft_plan, ksft_test_num()); + printf("# Totals: pass:%d fail:%d xfail:%d xpass:%d skip:%d error:%d\n", + ksft_cnt.ksft_pass, ksft_cnt.ksft_fail, + ksft_cnt.ksft_xfail, ksft_cnt.ksft_xpass, + ksft_cnt.ksft_xskip, ksft_cnt.ksft_error); +} + +static inline void ksft_print_msg(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + va_start(args, msg); + printf("# "); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_pass(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + ksft_cnt.ksft_pass++; + + va_start(args, msg); + printf("ok %d ", ksft_test_num()); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_fail(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + ksft_cnt.ksft_fail++; + + va_start(args, msg); + printf("not ok %d ", ksft_test_num()); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +/** + * ksft_test_result() - Report test success based on truth of condition + * + * @condition: if true, report test success, otherwise failure. + */ +#define ksft_test_result(condition, fmt, ...) do { \ + if (!!(condition)) \ + ksft_test_result_pass(fmt, ##__VA_ARGS__);\ + else \ + ksft_test_result_fail(fmt, ##__VA_ARGS__);\ + } while (0) + +static inline void ksft_test_result_xfail(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + ksft_cnt.ksft_xfail++; + + va_start(args, msg); + printf("ok %d # XFAIL ", ksft_test_num()); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +static inline void ksft_test_result_skip(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + ksft_cnt.ksft_xskip++; + + va_start(args, msg); + printf("ok %d # SKIP ", ksft_test_num()); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +/* TODO: how does "error" differ from "fail" or "skip"? */ +static inline void ksft_test_result_error(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + ksft_cnt.ksft_error++; + + va_start(args, msg); + printf("not ok %d # error ", ksft_test_num()); + errno = saved_errno; + vprintf(msg, args); + va_end(args); +} + +static inline int ksft_exit_pass(void) +{ + ksft_print_cnts(); + exit(KSFT_PASS); +} + +static inline int ksft_exit_fail(void) +{ + ksft_print_cnts(); + exit(KSFT_FAIL); +} + +/** + * ksft_exit() - Exit selftest based on truth of condition + * + * @condition: if true, exit self test with success, otherwise fail. + */ +#define ksft_exit(condition) do { \ + if (!!(condition)) \ + ksft_exit_pass(); \ + else \ + ksft_exit_fail(); \ + } while (0) + +static inline int ksft_exit_fail_msg(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + va_start(args, msg); + printf("Bail out! "); + errno = saved_errno; + vprintf(msg, args); + va_end(args); + + ksft_print_cnts(); + exit(KSFT_FAIL); +} + +static inline int ksft_exit_xfail(void) +{ + ksft_print_cnts(); + exit(KSFT_XFAIL); +} + +static inline int ksft_exit_xpass(void) +{ + ksft_print_cnts(); + exit(KSFT_XPASS); +} + +static inline int ksft_exit_skip(const char *msg, ...) +{ + int saved_errno = errno; + va_list args; + + va_start(args, msg); + + /* + * FIXME: several tests misuse ksft_exit_skip so produce + * something sensible if some tests have already been run + * or a plan has been printed. Those tests should use + * ksft_test_result_skip or ksft_exit_fail_msg instead. + */ + if (ksft_plan || ksft_test_num()) { + ksft_cnt.ksft_xskip++; + printf("ok %d # SKIP ", 1 + ksft_test_num()); + } else { + printf("1..0 # SKIP "); + } + if (msg) { + errno = saved_errno; + vprintf(msg, args); + va_end(args); + } + if (ksft_test_num()) + ksft_print_cnts(); + exit(KSFT_SKIP); +} + +#endif /* __KSELFTEST_H */ diff --git a/tools/testing/selftests/kselftest/module.sh b/tools/testing/selftests/kselftest/module.sh new file mode 100755 index 000000000..fb4733faf --- /dev/null +++ b/tools/testing/selftests/kselftest/module.sh @@ -0,0 +1,84 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +# +# Runs an individual test module. +# +# kselftest expects a separate executable for each test, this can be +# created by adding a script like this: +# +# #!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# $(dirname $0)/../kselftest/module.sh "description" module_name +# +# Example: tools/testing/selftests/lib/printf.sh + +desc="" # Output prefix. +module="" # Filename (without the .ko). +args="" # modprobe arguments. + +modprobe="/sbin/modprobe" + +main() { + parse_args "$@" + assert_root + assert_have_module + run_module +} + +parse_args() { + script=${0##*/} + + if [ $# -lt 2 ]; then + echo "Usage: $script <description> <module_name> [FAIL]" + exit 1 + fi + + desc="$1" + shift || true + module="$1" + shift || true + args="$@" +} + +assert_root() { + if [ ! -w /dev ]; then + skip "please run as root" + fi +} + +assert_have_module() { + if ! $modprobe -q -n $module; then + skip "module $module is not found" + fi +} + +run_module() { + if $modprobe -q $module $args; then + $modprobe -q -r $module + say "ok" + else + fail "" + fi +} + +say() { + echo "$desc: $1" +} + + +fail() { + say "$1 [FAIL]" >&2 + exit 1 +} + +skip() { + say "$1 [SKIP]" >&2 + # Kselftest framework requirement - SKIP code is 4. + exit 4 +} + +# +# Main script +# +main "$@" diff --git a/tools/testing/selftests/kselftest/prefix.pl b/tools/testing/selftests/kselftest/prefix.pl new file mode 100755 index 000000000..12a7f4ca2 --- /dev/null +++ b/tools/testing/selftests/kselftest/prefix.pl @@ -0,0 +1,24 @@ +#!/usr/bin/env perl +# SPDX-License-Identifier: GPL-2.0 +# Prefix all lines with "# ", unbuffered. Command being piped in may need +# to have unbuffering forced with "stdbuf -i0 -o0 -e0 $cmd". +use strict; +use IO::Handle; + +binmode STDIN; +binmode STDOUT; + +STDOUT->autoflush(1); + +my $needed = 1; +while (1) { + my $char; + my $bytes = sysread(STDIN, $char, 1); + exit 0 if ($bytes == 0); + if ($needed) { + print "# "; + $needed = 0; + } + print $char; + $needed = 1 if ($char eq "\n"); +} diff --git a/tools/testing/selftests/kselftest/runner.sh b/tools/testing/selftests/kselftest/runner.sh new file mode 100644 index 000000000..83616f077 --- /dev/null +++ b/tools/testing/selftests/kselftest/runner.sh @@ -0,0 +1,120 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Runs a set of tests in a given subdirectory. +export skip_rc=4 +export timeout_rc=124 +export logfile=/dev/stdout +export per_test_logging= + +# Defaults for "settings" file fields: +# "timeout" how many seconds to let each test run before failing. +export kselftest_default_timeout=45 + +# There isn't a shell-agnostic way to find the path of a sourced file, +# so we must rely on BASE_DIR being set to find other tools. +if [ -z "$BASE_DIR" ]; then + echo "Error: BASE_DIR must be set before sourcing." >&2 + exit 1 +fi + +# If Perl is unavailable, we must fall back to line-at-a-time prefixing +# with sed instead of unbuffered output. +tap_prefix() +{ + if [ ! -x /usr/bin/perl ]; then + sed -e 's/^/# /' + else + "$BASE_DIR"/kselftest/prefix.pl + fi +} + +tap_timeout() +{ + # Make sure tests will time out if utility is available. + if [ -x /usr/bin/timeout ] ; then + /usr/bin/timeout --foreground "$kselftest_timeout" \ + /usr/bin/timeout "$kselftest_timeout" $1 + else + $1 + fi +} + +run_one() +{ + DIR="$1" + TEST="$2" + NUM="$3" + + BASENAME_TEST=$(basename $TEST) + + # Reset any "settings"-file variables. + export kselftest_timeout="$kselftest_default_timeout" + # Load per-test-directory kselftest "settings" file. + settings="$BASE_DIR/$DIR/settings" + if [ -r "$settings" ] ; then + while read line ; do + # Skip comments. + if echo "$line" | grep -q '^#'; then + continue + fi + field=$(echo "$line" | cut -d= -f1) + value=$(echo "$line" | cut -d= -f2-) + eval "kselftest_$field"="$value" + done < "$settings" + fi + + TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST" + echo "# $TEST_HDR_MSG" + if [ ! -e "$TEST" ]; then + echo "# Warning: file $TEST is missing!" + echo "not ok $test_num $TEST_HDR_MSG" + else + cmd="./$BASENAME_TEST" + if [ ! -x "$TEST" ]; then + echo "# Warning: file $TEST is not executable" + + if [ $(head -n 1 "$TEST" | cut -c -2) = "#!" ] + then + interpreter=$(head -n 1 "$TEST" | cut -c 3-) + cmd="$interpreter ./$BASENAME_TEST" + else + echo "not ok $test_num $TEST_HDR_MSG" + return + fi + fi + cd `dirname $TEST` > /dev/null + ((((( tap_timeout "$cmd" 2>&1; echo $? >&3) | + tap_prefix >&4) 3>&1) | + (read xs; exit $xs)) 4>>"$logfile" && + echo "ok $test_num $TEST_HDR_MSG") || + (rc=$?; \ + if [ $rc -eq $skip_rc ]; then \ + echo "ok $test_num $TEST_HDR_MSG # SKIP" + elif [ $rc -eq $timeout_rc ]; then \ + echo "#" + echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT $kselftest_timeout seconds" + else + echo "not ok $test_num $TEST_HDR_MSG # exit=$rc" + fi) + cd - >/dev/null + fi +} + +run_many() +{ + echo "TAP version 13" + DIR="${PWD#${BASE_DIR}/}" + test_num=0 + total=$(echo "$@" | wc -w) + echo "1..$total" + for TEST in "$@"; do + BASENAME_TEST=$(basename $TEST) + test_num=$(( test_num + 1 )) + if [ -n "$per_test_logging" ]; then + logfile="/tmp/$BASENAME_TEST" + cat /dev/null > "$logfile" + fi + run_one "$DIR" "$TEST" "$test_num" + done +} diff --git a/tools/testing/selftests/kselftest_deps.sh b/tools/testing/selftests/kselftest_deps.sh new file mode 100755 index 000000000..e6010de67 --- /dev/null +++ b/tools/testing/selftests/kselftest_deps.sh @@ -0,0 +1,325 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# kselftest_deps.sh +# +# Checks for kselftest build dependencies on the build system. +# Copyright (c) 2020 Shuah Khan <skhan@linuxfoundation.org> +# +# + +usage() +{ + +echo -e "Usage: $0 -[p] <compiler> [test_name]\n" +echo -e "\tkselftest_deps.sh [-p] gcc" +echo -e "\tkselftest_deps.sh [-p] gcc vm" +echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc" +echo -e "\tkselftest_deps.sh [-p] aarch64-linux-gnu-gcc vm\n" +echo "- Should be run in selftests directory in the kernel repo." +echo "- Checks if Kselftests can be built/cross-built on a system." +echo "- Parses all test/sub-test Makefile to find library dependencies." +echo "- Runs compile test on a trivial C file with LDLIBS specified" +echo " in the test Makefiles to identify missing library dependencies." +echo "- Prints suggested target list for a system filtering out tests" +echo " failed the build dependency check from the TARGETS in Selftests" +echo " main Makefile when optional -p is specified." +echo "- Prints pass/fail dependency check for each tests/sub-test." +echo "- Prints pass/fail targets and libraries." +echo "- Default: runs dependency checks on all tests." +echo "- Optional test name can be specified to check dependencies for it." +exit 1 + +} + +# Start main() +main() +{ + +base_dir=`pwd` +# Make sure we're in the selftests top-level directory. +if [ $(basename "$base_dir") != "selftests" ]; then + echo -e "\tPlease run $0 in" + echo -e "\ttools/testing/selftests directory ..." + exit 1 +fi + +print_targets=0 + +while getopts "p" arg; do + case $arg in + p) + print_targets=1 + shift;; + esac +done + +if [ $# -eq 0 ] +then + usage +fi + +# Compiler +CC=$1 + +tmp_file=$(mktemp).c +trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT +#echo $tmp_file + +pass=$(mktemp).out +trap "rm -f $pass" EXIT +#echo $pass + +fail=$(mktemp).out +trap "rm -f $fail" EXIT +#echo $fail + +# Generate tmp source fire for compile test +cat << "EOF" > $tmp_file +int main() +{ +} +EOF + +# Save results +total_cnt=0 +fail_trgts=() +fail_libs=() +fail_cnt=0 +pass_trgts=() +pass_libs=() +pass_cnt=0 + +# Get all TARGETS from selftests Makefile +targets=$(egrep "^TARGETS +|^TARGETS =" Makefile | cut -d "=" -f2) + +# Initially, in LDLIBS related lines, the dep checker needs +# to ignore lines containing the following strings: +filter="\$(VAR_LDLIBS)\|pkg-config\|PKG_CONFIG\|IOURING_EXTRA_LIBS" + +# Single test case +if [ $# -eq 2 ] +then + test=$2/Makefile + + l1_test $test + l2_test $test + l3_test $test + l4_test $test + l5_test $test + + print_results $1 $2 + exit $? +fi + +# Level 1: LDLIBS set static. +# +# Find all LDLIBS set statically for all executables built by a Makefile +# and filter out VAR_LDLIBS to discard the following: +# gpio/Makefile:LDLIBS += $(VAR_LDLIBS) +# Append space at the end of the list to append more tests. + +l1_tests=$(grep -r --include=Makefile "^LDLIBS" | \ + grep -v "$filter" | awk -F: '{print $1}' | uniq) + +# Level 2: LDLIBS set dynamically. +# +# Level 2 +# Some tests have multiple valid LDLIBS lines for individual sub-tests +# that need dependency checks. Find them and append them to the tests +# e.g: vm/Makefile:$(OUTPUT)/userfaultfd: LDLIBS += -lpthread +# Filter out VAR_LDLIBS to discard the following: +# memfd/Makefile:$(OUTPUT)/fuse_mnt: LDLIBS += $(VAR_LDLIBS) +# Append space at the end of the list to append more tests. + +l2_tests=$(grep -r --include=Makefile ": LDLIBS" | \ + grep -v "$filter" | awk -F: '{print $1}' | uniq) + +# Level 3 +# gpio, memfd and others use pkg-config to find mount and fuse libs +# respectively and save it in VAR_LDLIBS. If pkg-config doesn't find +# any, VAR_LDLIBS set to default. +# Use the default value and filter out pkg-config for dependency check. +# e.g: +# gpio/Makefile +# VAR_LDLIBS := $(shell pkg-config --libs mount) 2>/dev/null) +# memfd/Makefile +# VAR_LDLIBS := $(shell pkg-config fuse --libs 2>/dev/null) + +l3_tests=$(grep -r --include=Makefile "^VAR_LDLIBS" | \ + grep -v "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq) + +# Level 4 +# some tests may fall back to default using `|| echo -l<libname>` +# if pkg-config doesn't find the libs, instead of using VAR_LDLIBS +# as per level 3 checks. +# e.g: +# netfilter/Makefile +# LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl) +l4_tests=$(grep -r --include=Makefile "^LDLIBS" | \ + grep "pkg-config\|PKG_CONFIG" | awk -F: '{print $1}' | uniq) + +# Level 5 +# some tests may use IOURING_EXTRA_LIBS to add extra libs to LDLIBS, +# which in turn may be defined in a sub-Makefile +# e.g.: +# mm/Makefile +# $(OUTPUT)/gup_longterm: LDLIBS += $(IOURING_EXTRA_LIBS) +l5_tests=$(grep -r --include=Makefile "LDLIBS +=.*\$(IOURING_EXTRA_LIBS)" | \ + awk -F: '{print $1}' | uniq) + +#echo l1_tests $l1_tests +#echo l2_tests $l2_tests +#echo l3_tests $l3_tests +#echo l4_tests $l4_tests +#echo l5_tests $l5_tests + +all_tests +print_results $1 $2 + +exit $? +} +# end main() + +all_tests() +{ + for test in $l1_tests; do + l1_test $test + done + + for test in $l2_tests; do + l2_test $test + done + + for test in $l3_tests; do + l3_test $test + done + + for test in $l4_tests; do + l4_test $test + done + + for test in $l5_tests; do + l5_test $test + done +} + +# Use same parsing used for l1_tests and pick libraries this time. +l1_test() +{ + test_libs=$(grep --include=Makefile "^LDLIBS" $test | \ + grep -v "$filter" | \ + sed -e 's/\:/ /' | \ + sed -e 's/+/ /' | cut -d "=" -f 2) + + check_libs $test $test_libs +} + +# Use same parsing used for l2_tests and pick libraries this time. +l2_test() +{ + test_libs=$(grep --include=Makefile ": LDLIBS" $test | \ + grep -v "$filter" | \ + sed -e 's/\:/ /' | sed -e 's/+/ /' | \ + cut -d "=" -f 2) + + check_libs $test $test_libs +} + +l3_test() +{ + test_libs=$(grep --include=Makefile "^VAR_LDLIBS" $test | \ + grep -v "pkg-config" | sed -e 's/\:/ /' | + sed -e 's/+/ /' | cut -d "=" -f 2) + + check_libs $test $test_libs +} + +l4_test() +{ + test_libs=$(grep --include=Makefile "^VAR_LDLIBS\|^LDLIBS" $test | \ + grep "\(pkg-config\|PKG_CONFIG\).*|| echo " | \ + sed -e 's/.*|| echo //' | sed -e 's/)$//') + + check_libs $test $test_libs +} + +l5_test() +{ + tests=$(find $(dirname "$test") -type f -name "*.mk") + test_libs=$(grep "^IOURING_EXTRA_LIBS +\?=" $tests | \ + cut -d "=" -f 2) + + check_libs $test $test_libs +} + +check_libs() +{ + +if [[ ! -z "${test_libs// }" ]] +then + + #echo $test_libs + + for lib in $test_libs; do + + let total_cnt+=1 + $CC -o $tmp_file.bin $lib $tmp_file > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "FAIL: $test dependency check: $lib" >> $fail + let fail_cnt+=1 + fail_libs+="$lib " + fail_target=$(echo "$test" | cut -d "/" -f1) + fail_trgts+="$fail_target " + targets=$(echo "$targets" | grep -v "$fail_target") + else + echo "PASS: $test dependency check passed $lib" >> $pass + let pass_cnt+=1 + pass_libs+="$lib " + pass_trgts+="$(echo "$test" | cut -d "/" -f1) " + fi + + done +fi +} + +print_results() +{ + echo -e "========================================================"; + echo -e "Kselftest Dependency Check for [$0 $1 $2] results..." + + if [ $print_targets -ne 0 ] + then + echo -e "Suggested Selftest Targets for your configuration:" + echo -e "$targets"; + fi + + echo -e "========================================================"; + echo -e "Checked tests defining LDLIBS dependencies" + echo -e "--------------------------------------------------------"; + echo -e "Total tests with Dependencies:" + echo -e "$total_cnt Pass: $pass_cnt Fail: $fail_cnt"; + + if [ $pass_cnt -ne 0 ]; then + echo -e "--------------------------------------------------------"; + cat $pass + echo -e "--------------------------------------------------------"; + echo -e "Targets passed build dependency check on system:" + echo -e "$(echo "$pass_trgts" | xargs -n1 | sort -u | xargs)" + fi + + if [ $fail_cnt -ne 0 ]; then + echo -e "--------------------------------------------------------"; + cat $fail + echo -e "--------------------------------------------------------"; + echo -e "Targets failed build dependency check on system:" + echo -e "$(echo "$fail_trgts" | xargs -n1 | sort -u | xargs)" + echo -e "--------------------------------------------------------"; + echo -e "Missing libraries system" + echo -e "$(echo "$fail_libs" | xargs -n1 | sort -u | xargs)" + fi + + echo -e "--------------------------------------------------------"; + echo -e "========================================================"; +} + +main "$@" diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h new file mode 100644 index 000000000..2fadc99d9 --- /dev/null +++ b/tools/testing/selftests/kselftest_harness.h @@ -0,0 +1,1065 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. + * + * kselftest_harness.h: simple C unit test helper. + * + * See documentation in Documentation/dev-tools/kselftest.rst + * + * API inspired by code.google.com/p/googletest + */ + +/** + * DOC: example + * + * .. code-block:: c + * + * #include "../kselftest_harness.h" + * + * TEST(standalone_test) { + * do_some_stuff; + * EXPECT_GT(10, stuff) { + * stuff_state_t state; + * enumerate_stuff_state(&state); + * TH_LOG("expectation failed with state: %s", state.msg); + * } + * more_stuff; + * ASSERT_NE(some_stuff, NULL) TH_LOG("how did it happen?!"); + * last_stuff; + * EXPECT_EQ(0, last_stuff); + * } + * + * FIXTURE(my_fixture) { + * mytype_t *data; + * int awesomeness_level; + * }; + * FIXTURE_SETUP(my_fixture) { + * self->data = mytype_new(); + * ASSERT_NE(NULL, self->data); + * } + * FIXTURE_TEARDOWN(my_fixture) { + * mytype_free(self->data); + * } + * TEST_F(my_fixture, data_is_good) { + * EXPECT_EQ(1, is_my_data_good(self->data)); + * } + * + * TEST_HARNESS_MAIN + */ + +#ifndef __KSELFTEST_HARNESS_H +#define __KSELFTEST_HARNESS_H + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include <asm/types.h> +#include <errno.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/wait.h> +#include <unistd.h> + +#include "kselftest.h" + +#define TEST_TIMEOUT_DEFAULT 30 + +/* Utilities exposed to the test definitions */ +#ifndef TH_LOG_STREAM +# define TH_LOG_STREAM stderr +#endif + +#ifndef TH_LOG_ENABLED +# define TH_LOG_ENABLED 1 +#endif + +/** + * TH_LOG(fmt, ...) + * + * @fmt: format string + * @...: optional arguments + * + * .. code-block:: c + * + * TH_LOG(format, ...) + * + * Optional debug logging function available for use in tests. + * Logging may be enabled or disabled by defining TH_LOG_ENABLED. + * E.g., #define TH_LOG_ENABLED 1 + * + * If no definition is provided, logging is enabled by default. + * + * If there is no way to print an error message for the process running the + * test (e.g. not allowed to write to stderr), it is still possible to get the + * ASSERT_* number for which the test failed. This behavior can be enabled by + * writing `_metadata->no_print = true;` before the check sequence that is + * unable to print. When an error occur, instead of printing an error message + * and calling `abort(3)`, the test process call `_exit(2)` with the assert + * number as argument, which is then printed by the parent process. + */ +#define TH_LOG(fmt, ...) do { \ + if (TH_LOG_ENABLED) \ + __TH_LOG(fmt, ##__VA_ARGS__); \ +} while (0) + +/* Unconditional logger for internal use. */ +#define __TH_LOG(fmt, ...) \ + fprintf(TH_LOG_STREAM, "# %s:%d:%s:" fmt "\n", \ + __FILE__, __LINE__, _metadata->name, ##__VA_ARGS__) + +/** + * SKIP(statement, fmt, ...) + * + * @statement: statement to run after reporting SKIP + * @fmt: format string + * @...: optional arguments + * + * This forces a "pass" after reporting why something is being skipped + * and runs "statement", which is usually "return" or "goto skip". + */ +#define SKIP(statement, fmt, ...) do { \ + snprintf(_metadata->results->reason, \ + sizeof(_metadata->results->reason), fmt, ##__VA_ARGS__); \ + if (TH_LOG_ENABLED) { \ + fprintf(TH_LOG_STREAM, "# SKIP %s\n", \ + _metadata->results->reason); \ + } \ + _metadata->passed = 1; \ + _metadata->skip = 1; \ + _metadata->trigger = 0; \ + statement; \ +} while (0) + +/** + * TEST(test_name) - Defines the test function and creates the registration + * stub + * + * @test_name: test name + * + * .. code-block:: c + * + * TEST(name) { implementation } + * + * Defines a test by name. + * Names must be unique and tests must not be run in parallel. The + * implementation containing block is a function and scoping should be treated + * as such. Returning early may be performed with a bare "return;" statement. + * + * EXPECT_* and ASSERT_* are valid in a TEST() { } context. + */ +#define TEST(test_name) __TEST_IMPL(test_name, -1) + +/** + * TEST_SIGNAL(test_name, signal) + * + * @test_name: test name + * @signal: signal number + * + * .. code-block:: c + * + * TEST_SIGNAL(name, signal) { implementation } + * + * Defines a test by name and the expected term signal. + * Names must be unique and tests must not be run in parallel. The + * implementation containing block is a function and scoping should be treated + * as such. Returning early may be performed with a bare "return;" statement. + * + * EXPECT_* and ASSERT_* are valid in a TEST() { } context. + */ +#define TEST_SIGNAL(test_name, signal) __TEST_IMPL(test_name, signal) + +#define __TEST_IMPL(test_name, _signal) \ + static void test_name(struct __test_metadata *_metadata); \ + static inline void wrapper_##test_name( \ + struct __test_metadata *_metadata, \ + struct __fixture_variant_metadata *variant) \ + { \ + test_name(_metadata); \ + } \ + static struct __test_metadata _##test_name##_object = \ + { .name = #test_name, \ + .fn = &wrapper_##test_name, \ + .fixture = &_fixture_global, \ + .termsig = _signal, \ + .timeout = TEST_TIMEOUT_DEFAULT, }; \ + static void __attribute__((constructor)) _register_##test_name(void) \ + { \ + __register_test(&_##test_name##_object); \ + } \ + static void test_name( \ + struct __test_metadata __attribute__((unused)) *_metadata) + +/** + * FIXTURE_DATA(datatype_name) - Wraps the struct name so we have one less + * argument to pass around + * + * @datatype_name: datatype name + * + * .. code-block:: c + * + * FIXTURE_DATA(datatype_name) + * + * Almost always, you want just FIXTURE() instead (see below). + * This call may be used when the type of the fixture data + * is needed. In general, this should not be needed unless + * the *self* is being passed to a helper directly. + */ +#define FIXTURE_DATA(datatype_name) struct _test_data_##datatype_name + +/** + * FIXTURE(fixture_name) - Called once per fixture to setup the data and + * register + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE(fixture_name) { + * type property1; + * ... + * }; + * + * Defines the data provided to TEST_F()-defined tests as *self*. It should be + * populated and cleaned up using FIXTURE_SETUP() and FIXTURE_TEARDOWN(). + */ +#define FIXTURE(fixture_name) \ + FIXTURE_VARIANT(fixture_name); \ + static struct __fixture_metadata _##fixture_name##_fixture_object = \ + { .name = #fixture_name, }; \ + static void __attribute__((constructor)) \ + _register_##fixture_name##_data(void) \ + { \ + __register_fixture(&_##fixture_name##_fixture_object); \ + } \ + FIXTURE_DATA(fixture_name) + +/** + * FIXTURE_SETUP(fixture_name) - Prepares the setup function for the fixture. + * *_metadata* is included so that EXPECT_* and ASSERT_* work correctly. + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE_SETUP(fixture_name) { implementation } + * + * Populates the required "setup" function for a fixture. An instance of the + * datatype defined with FIXTURE_DATA() will be exposed as *self* for the + * implementation. + * + * ASSERT_* are valid for use in this context and will prempt the execution + * of any dependent fixture tests. + * + * A bare "return;" statement may be used to return early. + */ +#define FIXTURE_SETUP(fixture_name) \ + void fixture_name##_setup( \ + struct __test_metadata __attribute__((unused)) *_metadata, \ + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \ + const FIXTURE_VARIANT(fixture_name) \ + __attribute__((unused)) *variant) + +/** + * FIXTURE_TEARDOWN(fixture_name) + * *_metadata* is included so that EXPECT_* and ASSERT_* work correctly. + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE_TEARDOWN(fixture_name) { implementation } + * + * Populates the required "teardown" function for a fixture. An instance of the + * datatype defined with FIXTURE_DATA() will be exposed as *self* for the + * implementation to clean up. + * + * A bare "return;" statement may be used to return early. + */ +#define FIXTURE_TEARDOWN(fixture_name) \ + void fixture_name##_teardown( \ + struct __test_metadata __attribute__((unused)) *_metadata, \ + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self) + +/** + * FIXTURE_VARIANT(fixture_name) - Optionally called once per fixture + * to declare fixture variant + * + * @fixture_name: fixture name + * + * .. code-block:: c + * + * FIXTURE_VARIANT(fixture_name) { + * type property1; + * ... + * }; + * + * Defines type of constant parameters provided to FIXTURE_SETUP() and TEST_F() + * as *variant*. Variants allow the same tests to be run with different + * arguments. + */ +#define FIXTURE_VARIANT(fixture_name) struct _fixture_variant_##fixture_name + +/** + * FIXTURE_VARIANT_ADD(fixture_name, variant_name) - Called once per fixture + * variant to setup and register the data + * + * @fixture_name: fixture name + * @variant_name: name of the parameter set + * + * .. code-block:: c + * + * FIXTURE_VARIANT_ADD(fixture_name, variant_name) { + * .property1 = val1, + * ... + * }; + * + * Defines a variant of the test fixture, provided to FIXTURE_SETUP() and + * TEST_F() as *variant*. Tests of each fixture will be run once for each + * variant. + */ +#define FIXTURE_VARIANT_ADD(fixture_name, variant_name) \ + extern FIXTURE_VARIANT(fixture_name) \ + _##fixture_name##_##variant_name##_variant; \ + static struct __fixture_variant_metadata \ + _##fixture_name##_##variant_name##_object = \ + { .name = #variant_name, \ + .data = &_##fixture_name##_##variant_name##_variant}; \ + static void __attribute__((constructor)) \ + _register_##fixture_name##_##variant_name(void) \ + { \ + __register_fixture_variant(&_##fixture_name##_fixture_object, \ + &_##fixture_name##_##variant_name##_object); \ + } \ + FIXTURE_VARIANT(fixture_name) \ + _##fixture_name##_##variant_name##_variant = + +/** + * TEST_F(fixture_name, test_name) - Emits test registration and helpers for + * fixture-based test cases + * + * @fixture_name: fixture name + * @test_name: test name + * + * .. code-block:: c + * + * TEST_F(fixture, name) { implementation } + * + * Defines a test that depends on a fixture (e.g., is part of a test case). + * Very similar to TEST() except that *self* is the setup instance of fixture's + * datatype exposed for use by the implementation. + * + * Warning: use of ASSERT_* here will skip TEARDOWN. + */ +/* TODO(wad) register fixtures on dedicated test lists. */ +#define TEST_F(fixture_name, test_name) \ + __TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT) + +#define TEST_F_SIGNAL(fixture_name, test_name, signal) \ + __TEST_F_IMPL(fixture_name, test_name, signal, TEST_TIMEOUT_DEFAULT) + +#define TEST_F_TIMEOUT(fixture_name, test_name, timeout) \ + __TEST_F_IMPL(fixture_name, test_name, -1, timeout) + +#define __TEST_F_IMPL(fixture_name, test_name, signal, tmout) \ + static void fixture_name##_##test_name( \ + struct __test_metadata *_metadata, \ + FIXTURE_DATA(fixture_name) *self, \ + const FIXTURE_VARIANT(fixture_name) *variant); \ + static inline void wrapper_##fixture_name##_##test_name( \ + struct __test_metadata *_metadata, \ + struct __fixture_variant_metadata *variant) \ + { \ + /* fixture data is alloced, setup, and torn down per call. */ \ + FIXTURE_DATA(fixture_name) self; \ + memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \ + fixture_name##_setup(_metadata, &self, variant->data); \ + /* Let setup failure terminate early. */ \ + if (!_metadata->passed) \ + return; \ + fixture_name##_##test_name(_metadata, &self, variant->data); \ + fixture_name##_teardown(_metadata, &self); \ + } \ + static struct __test_metadata \ + _##fixture_name##_##test_name##_object = { \ + .name = #test_name, \ + .fn = &wrapper_##fixture_name##_##test_name, \ + .fixture = &_##fixture_name##_fixture_object, \ + .termsig = signal, \ + .timeout = tmout, \ + }; \ + static void __attribute__((constructor)) \ + _register_##fixture_name##_##test_name(void) \ + { \ + __register_test(&_##fixture_name##_##test_name##_object); \ + } \ + static void fixture_name##_##test_name( \ + struct __test_metadata __attribute__((unused)) *_metadata, \ + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \ + const FIXTURE_VARIANT(fixture_name) \ + __attribute__((unused)) *variant) + +/** + * TEST_HARNESS_MAIN - Simple wrapper to run the test harness + * + * .. code-block:: c + * + * TEST_HARNESS_MAIN + * + * Use once to append a main() to the test file. + */ +#define TEST_HARNESS_MAIN \ + static void __attribute__((constructor)) \ + __constructor_order_last(void) \ + { \ + if (!__constructor_order) \ + __constructor_order = _CONSTRUCTOR_ORDER_BACKWARD; \ + } \ + int main(int argc, char **argv) { \ + return test_harness_run(argc, argv); \ + } + +/** + * DOC: operators + * + * Operators for use in TEST() and TEST_F(). + * ASSERT_* calls will stop test execution immediately. + * EXPECT_* calls will emit a failure warning, note it, and continue. + */ + +/** + * ASSERT_EQ() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_EQ(expected, measured): expected == measured + */ +#define ASSERT_EQ(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, ==, 1) + +/** + * ASSERT_NE() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_NE(expected, measured): expected != measured + */ +#define ASSERT_NE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, !=, 1) + +/** + * ASSERT_LT() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_LT(expected, measured): expected < measured + */ +#define ASSERT_LT(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, <, 1) + +/** + * ASSERT_LE() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_LE(expected, measured): expected <= measured + */ +#define ASSERT_LE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, <=, 1) + +/** + * ASSERT_GT() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_GT(expected, measured): expected > measured + */ +#define ASSERT_GT(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, >, 1) + +/** + * ASSERT_GE() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_GE(expected, measured): expected >= measured + */ +#define ASSERT_GE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, >=, 1) + +/** + * ASSERT_NULL() + * + * @seen: measured value + * + * ASSERT_NULL(measured): NULL == measured + */ +#define ASSERT_NULL(seen) \ + __EXPECT(NULL, "NULL", seen, #seen, ==, 1) + +/** + * ASSERT_TRUE() + * + * @seen: measured value + * + * ASSERT_TRUE(measured): measured != 0 + */ +#define ASSERT_TRUE(seen) \ + __EXPECT(0, "0", seen, #seen, !=, 1) + +/** + * ASSERT_FALSE() + * + * @seen: measured value + * + * ASSERT_FALSE(measured): measured == 0 + */ +#define ASSERT_FALSE(seen) \ + __EXPECT(0, "0", seen, #seen, ==, 1) + +/** + * ASSERT_STREQ() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_STREQ(expected, measured): !strcmp(expected, measured) + */ +#define ASSERT_STREQ(expected, seen) \ + __EXPECT_STR(expected, seen, ==, 1) + +/** + * ASSERT_STRNE() + * + * @expected: expected value + * @seen: measured value + * + * ASSERT_STRNE(expected, measured): strcmp(expected, measured) + */ +#define ASSERT_STRNE(expected, seen) \ + __EXPECT_STR(expected, seen, !=, 1) + +/** + * EXPECT_EQ() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_EQ(expected, measured): expected == measured + */ +#define EXPECT_EQ(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, ==, 0) + +/** + * EXPECT_NE() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_NE(expected, measured): expected != measured + */ +#define EXPECT_NE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, !=, 0) + +/** + * EXPECT_LT() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_LT(expected, measured): expected < measured + */ +#define EXPECT_LT(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, <, 0) + +/** + * EXPECT_LE() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_LE(expected, measured): expected <= measured + */ +#define EXPECT_LE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, <=, 0) + +/** + * EXPECT_GT() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_GT(expected, measured): expected > measured + */ +#define EXPECT_GT(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, >, 0) + +/** + * EXPECT_GE() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_GE(expected, measured): expected >= measured + */ +#define EXPECT_GE(expected, seen) \ + __EXPECT(expected, #expected, seen, #seen, >=, 0) + +/** + * EXPECT_NULL() + * + * @seen: measured value + * + * EXPECT_NULL(measured): NULL == measured + */ +#define EXPECT_NULL(seen) \ + __EXPECT(NULL, "NULL", seen, #seen, ==, 0) + +/** + * EXPECT_TRUE() + * + * @seen: measured value + * + * EXPECT_TRUE(measured): 0 != measured + */ +#define EXPECT_TRUE(seen) \ + __EXPECT(0, "0", seen, #seen, !=, 0) + +/** + * EXPECT_FALSE() + * + * @seen: measured value + * + * EXPECT_FALSE(measured): 0 == measured + */ +#define EXPECT_FALSE(seen) \ + __EXPECT(0, "0", seen, #seen, ==, 0) + +/** + * EXPECT_STREQ() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_STREQ(expected, measured): !strcmp(expected, measured) + */ +#define EXPECT_STREQ(expected, seen) \ + __EXPECT_STR(expected, seen, ==, 0) + +/** + * EXPECT_STRNE() + * + * @expected: expected value + * @seen: measured value + * + * EXPECT_STRNE(expected, measured): strcmp(expected, measured) + */ +#define EXPECT_STRNE(expected, seen) \ + __EXPECT_STR(expected, seen, !=, 0) + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) + +/* Support an optional handler after and ASSERT_* or EXPECT_*. The approach is + * not thread-safe, but it should be fine in most sane test scenarios. + * + * Using __bail(), which optionally abort()s, is the easiest way to early + * return while still providing an optional block to the API consumer. + */ +#define OPTIONAL_HANDLER(_assert) \ + for (; _metadata->trigger; _metadata->trigger = \ + __bail(_assert, _metadata->no_print, _metadata->step)) + +#define __INC_STEP(_metadata) \ + /* Keep "step" below 255 (which is used for "SKIP" reporting). */ \ + if (_metadata->passed && _metadata->step < 253) \ + _metadata->step++; + +#define is_signed_type(var) (!!(((__typeof__(var))(-1)) < (__typeof__(var))1)) + +#define __EXPECT(_expected, _expected_str, _seen, _seen_str, _t, _assert) do { \ + /* Avoid multiple evaluation of the cases */ \ + __typeof__(_expected) __exp = (_expected); \ + __typeof__(_seen) __seen = (_seen); \ + if (_assert) __INC_STEP(_metadata); \ + if (!(__exp _t __seen)) { \ + /* Report with actual signedness to avoid weird output. */ \ + switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \ + case 0: { \ + unsigned long long __exp_print = (uintptr_t)__exp; \ + unsigned long long __seen_print = (uintptr_t)__seen; \ + __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ + _expected_str, __exp_print, #_t, \ + _seen_str, __seen_print); \ + break; \ + } \ + case 1: { \ + unsigned long long __exp_print = (uintptr_t)__exp; \ + long long __seen_print = (intptr_t)__seen; \ + __TH_LOG("Expected %s (%llu) %s %s (%lld)", \ + _expected_str, __exp_print, #_t, \ + _seen_str, __seen_print); \ + break; \ + } \ + case 2: { \ + long long __exp_print = (intptr_t)__exp; \ + unsigned long long __seen_print = (uintptr_t)__seen; \ + __TH_LOG("Expected %s (%lld) %s %s (%llu)", \ + _expected_str, __exp_print, #_t, \ + _seen_str, __seen_print); \ + break; \ + } \ + case 3: { \ + long long __exp_print = (intptr_t)__exp; \ + long long __seen_print = (intptr_t)__seen; \ + __TH_LOG("Expected %s (%lld) %s %s (%lld)", \ + _expected_str, __exp_print, #_t, \ + _seen_str, __seen_print); \ + break; \ + } \ + } \ + _metadata->passed = 0; \ + /* Ensure the optional handler is triggered */ \ + _metadata->trigger = 1; \ + } \ +} while (0); OPTIONAL_HANDLER(_assert) + +#define __EXPECT_STR(_expected, _seen, _t, _assert) do { \ + const char *__exp = (_expected); \ + const char *__seen = (_seen); \ + if (_assert) __INC_STEP(_metadata); \ + if (!(strcmp(__exp, __seen) _t 0)) { \ + __TH_LOG("Expected '%s' %s '%s'.", __exp, #_t, __seen); \ + _metadata->passed = 0; \ + _metadata->trigger = 1; \ + } \ +} while (0); OPTIONAL_HANDLER(_assert) + +/* List helpers */ +#define __LIST_APPEND(head, item) \ +{ \ + /* Circular linked list where only prev is circular. */ \ + if (head == NULL) { \ + head = item; \ + item->next = NULL; \ + item->prev = item; \ + return; \ + } \ + if (__constructor_order == _CONSTRUCTOR_ORDER_FORWARD) { \ + item->next = NULL; \ + item->prev = head->prev; \ + item->prev->next = item; \ + head->prev = item; \ + } else { \ + item->next = head; \ + item->next->prev = item; \ + item->prev = item; \ + head = item; \ + } \ +} + +struct __test_results { + char reason[1024]; /* Reason for test result */ +}; + +struct __test_metadata; +struct __fixture_variant_metadata; + +/* Contains all the information about a fixture. */ +struct __fixture_metadata { + const char *name; + struct __test_metadata *tests; + struct __fixture_variant_metadata *variant; + struct __fixture_metadata *prev, *next; +} _fixture_global __attribute__((unused)) = { + .name = "global", + .prev = &_fixture_global, +}; + +static struct __fixture_metadata *__fixture_list = &_fixture_global; +static int __constructor_order; + +#define _CONSTRUCTOR_ORDER_FORWARD 1 +#define _CONSTRUCTOR_ORDER_BACKWARD -1 + +static inline void __register_fixture(struct __fixture_metadata *f) +{ + __LIST_APPEND(__fixture_list, f); +} + +struct __fixture_variant_metadata { + const char *name; + const void *data; + struct __fixture_variant_metadata *prev, *next; +}; + +static inline void +__register_fixture_variant(struct __fixture_metadata *f, + struct __fixture_variant_metadata *variant) +{ + __LIST_APPEND(f->variant, variant); +} + +/* Contains all the information for test execution and status checking. */ +struct __test_metadata { + const char *name; + void (*fn)(struct __test_metadata *, + struct __fixture_variant_metadata *); + pid_t pid; /* pid of test when being run */ + struct __fixture_metadata *fixture; + int termsig; + int passed; + int skip; /* did SKIP get used? */ + int trigger; /* extra handler after the evaluation */ + int timeout; /* seconds to wait for test timeout */ + bool timed_out; /* did this test timeout instead of exiting? */ + __u8 step; + bool no_print; /* manual trigger when TH_LOG_STREAM is not available */ + struct __test_results *results; + struct __test_metadata *prev, *next; +}; + +/* + * Since constructors are called in reverse order, reverse the test + * list so tests are run in source declaration order. + * https://gcc.gnu.org/onlinedocs/gccint/Initialization.html + * However, it seems not all toolchains do this correctly, so use + * __constructor_order to detect which direction is called first + * and adjust list building logic to get things running in the right + * direction. + */ +static inline void __register_test(struct __test_metadata *t) +{ + __LIST_APPEND(t->fixture->tests, t); +} + +static inline int __bail(int for_realz, bool no_print, __u8 step) +{ + if (for_realz) { + if (no_print) + _exit(step); + abort(); + } + return 0; +} + +struct __test_metadata *__active_test; +static void __timeout_handler(int sig, siginfo_t *info, void *ucontext) +{ + struct __test_metadata *t = __active_test; + + /* Sanity check handler execution environment. */ + if (!t) { + fprintf(TH_LOG_STREAM, + "# no active test in SIGALRM handler!?\n"); + abort(); + } + if (sig != SIGALRM || sig != info->si_signo) { + fprintf(TH_LOG_STREAM, + "# %s: SIGALRM handler caught signal %d!?\n", + t->name, sig != SIGALRM ? sig : info->si_signo); + abort(); + } + + t->timed_out = true; + // signal process group + kill(-(t->pid), SIGKILL); +} + +void __wait_for_test(struct __test_metadata *t) +{ + struct sigaction action = { + .sa_sigaction = __timeout_handler, + .sa_flags = SA_SIGINFO, + }; + struct sigaction saved_action; + int status; + + if (sigaction(SIGALRM, &action, &saved_action)) { + t->passed = 0; + fprintf(TH_LOG_STREAM, + "# %s: unable to install SIGALRM handler\n", + t->name); + return; + } + __active_test = t; + t->timed_out = false; + alarm(t->timeout); + waitpid(t->pid, &status, 0); + alarm(0); + if (sigaction(SIGALRM, &saved_action, NULL)) { + t->passed = 0; + fprintf(TH_LOG_STREAM, + "# %s: unable to uninstall SIGALRM handler\n", + t->name); + return; + } + __active_test = NULL; + + if (t->timed_out) { + t->passed = 0; + fprintf(TH_LOG_STREAM, + "# %s: Test terminated by timeout\n", t->name); + } else if (WIFEXITED(status)) { + if (WEXITSTATUS(status) == 255) { + /* SKIP */ + t->passed = 1; + t->skip = 1; + } else if (t->termsig != -1) { + t->passed = 0; + fprintf(TH_LOG_STREAM, + "# %s: Test exited normally instead of by signal (code: %d)\n", + t->name, + WEXITSTATUS(status)); + } else { + switch (WEXITSTATUS(status)) { + /* Success */ + case 0: + t->passed = 1; + break; + /* Other failure, assume step report. */ + default: + t->passed = 0; + fprintf(TH_LOG_STREAM, + "# %s: Test failed at step #%d\n", + t->name, + WEXITSTATUS(status)); + } + } + } else if (WIFSIGNALED(status)) { + t->passed = 0; + if (WTERMSIG(status) == SIGABRT) { + fprintf(TH_LOG_STREAM, + "# %s: Test terminated by assertion\n", + t->name); + } else if (WTERMSIG(status) == t->termsig) { + t->passed = 1; + } else { + fprintf(TH_LOG_STREAM, + "# %s: Test terminated unexpectedly by signal %d\n", + t->name, + WTERMSIG(status)); + } + } else { + fprintf(TH_LOG_STREAM, + "# %s: Test ended in some other way [%u]\n", + t->name, + status); + } +} + +void __run_test(struct __fixture_metadata *f, + struct __fixture_variant_metadata *variant, + struct __test_metadata *t) +{ + /* reset test struct */ + t->passed = 1; + t->skip = 0; + t->trigger = 0; + t->step = 1; + t->no_print = 0; + memset(t->results->reason, 0, sizeof(t->results->reason)); + + ksft_print_msg(" RUN %s%s%s.%s ...\n", + f->name, variant->name[0] ? "." : "", variant->name, t->name); + + /* Make sure output buffers are flushed before fork */ + fflush(stdout); + fflush(stderr); + + t->pid = fork(); + if (t->pid < 0) { + ksft_print_msg("ERROR SPAWNING TEST CHILD\n"); + t->passed = 0; + } else if (t->pid == 0) { + setpgrp(); + t->fn(t, variant); + if (t->skip) + _exit(255); + /* Pass is exit 0 */ + if (t->passed) + _exit(0); + /* Something else happened, report the step. */ + _exit(t->step); + } else { + __wait_for_test(t); + } + ksft_print_msg(" %4s %s%s%s.%s\n", t->passed ? "OK" : "FAIL", + f->name, variant->name[0] ? "." : "", variant->name, t->name); + + if (t->skip) + ksft_test_result_skip("%s\n", t->results->reason[0] ? + t->results->reason : "unknown"); + else + ksft_test_result(t->passed, "%s%s%s.%s\n", + f->name, variant->name[0] ? "." : "", variant->name, t->name); +} + +static int test_harness_run(int __attribute__((unused)) argc, + char __attribute__((unused)) **argv) +{ + struct __fixture_variant_metadata no_variant = { .name = "", }; + struct __fixture_variant_metadata *v; + struct __fixture_metadata *f; + struct __test_results *results; + struct __test_metadata *t; + int ret = 0; + unsigned int case_count = 0, test_count = 0; + unsigned int count = 0; + unsigned int pass_count = 0; + + for (f = __fixture_list; f; f = f->next) { + for (v = f->variant ?: &no_variant; v; v = v->next) { + case_count++; + for (t = f->tests; t; t = t->next) + test_count++; + } + } + + results = mmap(NULL, sizeof(*results), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + ksft_print_header(); + ksft_set_plan(test_count); + ksft_print_msg("Starting %u tests from %u test cases.\n", + test_count, case_count); + for (f = __fixture_list; f; f = f->next) { + for (v = f->variant ?: &no_variant; v; v = v->next) { + for (t = f->tests; t; t = t->next) { + count++; + t->results = results; + __run_test(f, v, t); + t->results = NULL; + if (t->passed) + pass_count++; + else + ret = 1; + } + } + } + munmap(results, sizeof(*results)); + + ksft_print_msg("%s: %u / %u tests passed.\n", ret ? "FAILED" : "PASSED", + pass_count, count); + ksft_exit(ret == 0); + + /* unreachable */ + return KSFT_FAIL; +} + +static void __attribute__((constructor)) __constructor_order_first(void) +{ + if (!__constructor_order) + __constructor_order = _CONSTRUCTOR_ORDER_FORWARD; +} + +#endif /* __KSELFTEST_HARNESS_H */ diff --git a/tools/testing/selftests/kselftest_install.sh b/tools/testing/selftests/kselftest_install.sh new file mode 100755 index 000000000..407af7da7 --- /dev/null +++ b/tools/testing/selftests/kselftest_install.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Kselftest Install +# Install kselftest tests +# Author: Shuah Khan <shuahkh@osg.samsung.com> +# Copyright (C) 2015 Samsung Electronics Co., Ltd. + +main() +{ + base_dir=`pwd` + install_dir="$base_dir"/kselftest_install + + # Make sure we're in the selftests top-level directory. + if [ $(basename "$base_dir") != "selftests" ]; then + echo "$0: Please run it in selftests directory ..." + exit 1; + fi + + # Only allow installation into an existing location. + if [ "$#" -eq 0 ]; then + echo "$0: Installing in default location - $install_dir ..." + elif [ ! -d "$1" ]; then + echo "$0: $1 doesn't exist!!" + exit 1; + else + install_dir="$1" + echo "$0: Installing in specified location - $install_dir ..." + fi + + # Build tests + KSFT_INSTALL_PATH="$install_dir" make install +} + +main "$@" diff --git a/tools/testing/selftests/kselftest_module.h b/tools/testing/selftests/kselftest_module.h new file mode 100644 index 000000000..e8eafaf09 --- /dev/null +++ b/tools/testing/selftests/kselftest_module.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef __KSELFTEST_MODULE_H +#define __KSELFTEST_MODULE_H + +#include <linux/module.h> + +/* + * Test framework for writing test modules to be loaded by kselftest. + * See Documentation/dev-tools/kselftest.rst for an example test module. + */ + +#define KSTM_MODULE_GLOBALS() \ +static unsigned int total_tests __initdata; \ +static unsigned int failed_tests __initdata + +#define KSTM_CHECK_ZERO(x) do { \ + total_tests++; \ + if (x) { \ + pr_warn("TC failed at %s:%d\n", __func__, __LINE__); \ + failed_tests++; \ + } \ +} while (0) + +static inline int kstm_report(unsigned int total_tests, unsigned int failed_tests) +{ + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_warn("failed %u out of %u tests\n", failed_tests, total_tests); + + return failed_tests ? -EINVAL : 0; +} + +#define KSTM_MODULE_LOADERS(__module) \ +static int __init __module##_init(void) \ +{ \ + pr_info("loaded.\n"); \ + selftest(); \ + return kstm_report(total_tests, failed_tests); \ +} \ +static void __exit __module##_exit(void) \ +{ \ + pr_info("unloaded.\n"); \ +} \ +module_init(__module##_init); \ +module_exit(__module##_exit) + +#endif /* __KSELFTEST_MODULE_H */ |