summaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /tools/perf/tests/shell
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/tests/shell')
-rw-r--r--tools/perf/tests/shell/base_probe/settings.sh48
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh278
-rwxr-xr-xtools/perf/tests/shell/common/check_all_lines_matched.pl39
-rwxr-xr-xtools/perf/tests/shell/common/check_all_patterns_found.pl34
-rwxr-xr-xtools/perf/tests/shell/common/check_no_patterns_found.pl34
-rw-r--r--tools/perf/tests/shell/common/init.sh117
-rw-r--r--tools/perf/tests/shell/common/patterns.sh268
-rw-r--r--tools/perf/tests/shell/common/settings.sh79
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py4
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py231
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh12
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh23
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh13
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh4
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh4
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh6
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh2
20 files changed, 1094 insertions, 118 deletions
diff --git a/tools/perf/tests/shell/base_probe/settings.sh b/tools/perf/tests/shell/base_probe/settings.sh
new file mode 100644
index 0000000000..123621c7f9
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/settings.sh
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# settings.sh of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+
+export TEST_NAME="perf_probe"
+
+export MY_ARCH=`arch`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
+
+check_kprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/kprobe_events
+}
+
+check_uprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/uprobe_events
+}
+
+clear_all_probes()
+{
+ echo 0 > /sys/kernel/debug/tracing/events/enable
+ check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
+ check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
+}
+
+check_sdt_support()
+{
+ $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null
+}
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
new file mode 100755
index 0000000000..a5d707efad
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_adding_kernel of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests adding of probes, their correct listing
+# and removing.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+# shellcheck disable=SC2034 # the variable is later used after the working environment is included
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+TEST_PROBE=${TEST_PROBE:-"inode_permission"}
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### basic probe adding
+
+for opt in "" "-a" "--add"; do
+ clear_all_probes
+ $CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### listing added probe :: perf list
+
+# any added probes should appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list"
+(( TEST_RESULT += $? ))
+
+
+### listing added probe :: perf probe -l
+
+# '-l' should list all the added probes as well
+$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+(( TEST_RESULT += $? ))
+
+
+### using added probe
+
+$CMD_PERF stat -e probe:$TEST_PROBE\* -o $LOGS_DIR/adding_kernel_using_probe.log -- cat /proc/uptime > /dev/null
+PERF_EXIT_CODE=$?
+
+REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':"
+REGEX_STAT_VALUES="\s*\d+\s+probe:$TEST_PROBE"
+# the value should be greater than 1
+REGEX_STAT_VALUE_NONZERO="\s*[1-9][0-9]*\s+probe:$TEST_PROBE"
+REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds (?:time elapsed|user|sys)"
+../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" < $LOGS_DIR/adding_kernel_using_probe.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
+(( TEST_RESULT += $? ))
+
+
+### removing added probe
+
+# '-d' should remove the probe
+$CMD_PERF probe -d $TEST_PROBE\* 2> $LOGS_DIR/adding_kernel_removing.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
+(( TEST_RESULT += $? ))
+
+
+### listing removed probe
+
+# removed probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_kernel_list_removed.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+### dry run
+
+# the '-n' switch should run it in dry mode
+$CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err
+PERF_EXIT_CODE=$?
+
+# check for the output (should be the same as usual)
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err
+CHECK_EXIT_CODE=$?
+
+# check that no probe was added in real
+! ( $CMD_PERF probe -l | grep "probe:$TEST_PROBE" )
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe"
+(( TEST_RESULT += $? ))
+
+
+### force-adding probes
+
+# when using '--force' a probe should be added even if it is already there
+$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding"
+(( TEST_RESULT += $? ))
+
+# adding existing probe without '--force' should fail
+! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)"
+(( TEST_RESULT += $? ))
+
+# adding existing probe with '--force' should pass
+NO_OF_PROBES=`$CMD_PERF probe -l | wc -l`
+$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)"
+(( TEST_RESULT += $? ))
+
+
+### using doubled probe
+
+# since they are the same, they should produce the same results
+$CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_${NO_OF_PROBES} -x';' -o $LOGS_DIR/adding_kernel_using_two.log -- bash -c 'cat /proc/cpuinfo > /dev/null'
+PERF_EXIT_CODE=$?
+
+REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?(?:$NO_OF_PROBES)?;$RE_NUMBER;$RE_NUMBER"
+../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log
+CHECK_EXIT_CODE=$?
+
+VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+VALUE_2=`grep "${TEST_PROBE}_${NO_OF_PROBES};" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+
+test $VALUE_1 -eq $VALUE_2
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
+
+
+### removing multiple probes
+
+# using wildcards should remove all matching probes
+$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
+(( TEST_RESULT += $? ))
+
+
+### wildcard adding support
+
+$CMD_PERF probe -nf --max-probes=512 -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+(( TEST_RESULT += $? ))
+
+
+### non-existing variable
+
+# perf probe should survive a non-existing variable probing attempt
+{ $CMD_PERF probe 'vfs_read somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64' ; } 2> $LOGS_DIR/adding_kernel_nonexisting.err
+PERF_EXIT_CODE=$?
+
+# the exitcode should not be 0 or segfault
+test $PERF_EXIT_CODE -ne 139 -a $PERF_EXIT_CODE -ne 0
+PERF_EXIT_CODE=$?
+
+# check that the error message is reasonable
+../common/check_all_patterns_found.pl "Failed to find" "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" < $LOGS_DIR/adding_kernel_nonexisting.err
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "in this function|at this address" "Error" "Failed to add events" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_all_lines_matched.pl "Failed to find" "Error" "Probe point .+ not found" "optimized out" "Use.+\-\-range option to show.+location range" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable"
+(( TEST_RESULT += $? ))
+
+
+### function with return value
+
+# adding probe with return value
+$CMD_PERF probe --add "$TEST_PROBE%return \$retval" 2> $LOGS_DIR/adding_kernel_func_retval_add.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE%return with \\\$retval" < $LOGS_DIR/adding_kernel_func_retval_add.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add"
+(( TEST_RESULT += $? ))
+
+# recording some data
+$CMD_PERF record -e probe:$TEST_PROBE\* -o $CURRENT_TEST_DIR/perf.data -- cat /proc/cpuinfo > /dev/null 2> $LOGS_DIR/adding_kernel_func_retval_record.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/adding_kernel_func_retval_record.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: record"
+(( TEST_RESULT += $? ))
+
+# perf script should report the function calls with the correct arg values
+$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/adding_kernel_func_retval_script.log
+PERF_EXIT_CODE=$?
+
+REGEX_SCRIPT_LINE="\s*cat\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe:$TEST_PROBE\w*:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\)\s+arg1=$RE_NUMBER_HEX"
+../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script"
+(( TEST_RESULT += $? ))
+
+
+clear_all_probes
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/common/check_all_lines_matched.pl b/tools/perf/tests/shell/common/check_all_lines_matched.pl
new file mode 100755
index 0000000000..fded48959a
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_lines_matched.pl
@@ -0,0 +1,39 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_all_patterns_found.pl b/tools/perf/tests/shell/common/check_all_patterns_found.pl
new file mode 100755
index 0000000000..11bdf1d346
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1; # FIXME: maybe add counters -- how many times was the regexp matched
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ unless (exists $found{$r})
+ {
+ print "Regexp not found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_no_patterns_found.pl b/tools/perf/tests/shell/common/check_no_patterns_found.pl
new file mode 100755
index 0000000000..770999e87a
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_no_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1;
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ if (exists $found{$r})
+ {
+ print "Regexp found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
new file mode 100644
index 0000000000..aadeaf782e
--- /dev/null
+++ b/tools/perf/tests/shell/common/init.sh
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# init.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file should be used for initialization of basic functions
+# for checking, reporting results etc.
+#
+#
+
+
+. ../common/settings.sh
+. ../common/patterns.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+_echo()
+{
+ test "$TESTLOG_VERBOSITY" -ne 0 && echo -e "$@"
+}
+
+print_results()
+{
+ PERF_RETVAL="$1"; shift
+ CHECK_RETVAL="$1"; shift
+ FAILURE_REASON=""
+ TASK_COMMENT="$@"
+ if [ $PERF_RETVAL -eq 0 -a $CHECK_RETVAL -eq 0 ]; then
+ _echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT"
+ return 0
+ else
+ if [ $PERF_RETVAL -ne 0 ]; then
+ FAILURE_REASON="command exitcode"
+ fi
+ if [ $CHECK_RETVAL -ne 0 ]; then
+ test -n "$FAILURE_REASON" && FAILURE_REASON="$FAILURE_REASON + "
+ FAILURE_REASON="$FAILURE_REASON""output regexp parsing"
+ fi
+ _echo "$MFAIL-- [ FAIL ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT ($FAILURE_REASON)"
+ return 1
+ fi
+}
+
+print_overall_results()
+{
+ RETVAL="$1"; shift
+ if [ $RETVAL -eq 0 ]; then
+ _echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
+ else
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ fi
+ return $RETVAL
+}
+
+print_testcase_skipped()
+{
+ TASK_COMMENT="$@"
+ _echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped"
+ return 0
+}
+
+print_overall_skipped()
+{
+ _echo "$MSKIP## [ SKIP ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME :: testcase skipped"
+ return 0
+}
+
+print_warning()
+{
+ WARN_COMMENT="$@"
+ _echo "$MWARN-- [ WARN ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $WARN_COMMENT"
+ return 0
+}
+
+# this function should skip a testcase if the testsuite is not run in
+# a runmode that fits the testcase --> if the suite runs in BASIC mode
+# all STANDARD and EXPERIMENTAL testcases will be skipped; if the suite
+# runs in STANDARD mode, all EXPERIMENTAL testcases will be skipped and
+# if the suite runs in EXPERIMENTAL mode, nothing is skipped
+consider_skipping()
+{
+ TESTCASE_RUNMODE="$1"
+ # the runmode of a testcase needs to be at least the current suite's runmode
+ if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
+ print_overall_skipped
+ exit 0
+ fi
+}
+
+detect_baremetal()
+{
+ # return values:
+ # 0 = bare metal
+ # 1 = virtualization detected
+ # 2 = unknown state
+ VIRT=`systemd-detect-virt 2>/dev/null`
+ test $? -eq 127 && return 2
+ test "$VIRT" = "none"
+}
+
+detect_intel()
+{
+ # return values:
+ # 0 = is Intel
+ # 1 = is not Intel or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "GenuineIntel"
+}
+
+detect_amd()
+{
+ # return values:
+ # 0 = is AMD
+ # 1 = is not AMD or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "AMD"
+}
diff --git a/tools/perf/tests/shell/common/patterns.sh b/tools/perf/tests/shell/common/patterns.sh
new file mode 100644
index 0000000000..21dab25c7b
--- /dev/null
+++ b/tools/perf/tests/shell/common/patterns.sh
@@ -0,0 +1,268 @@
+# SPDX-License-Identifier: GPL-2.0
+
+export RE_NUMBER="[0-9\.]+"
+# Number
+# Examples:
+# 123.456
+
+
+export RE_NUMBER_HEX="[0-9A-Fa-f]+"
+# Hexadecimal number
+# Examples:
+# 1234
+# a58d
+# aBcD
+# deadbeef
+
+
+export RE_DATE_YYYYMMDD="[0-9]{4}-(?:(?:01|03|05|07|08|10|12)-(?:[0-2][0-9]|3[0-1])|02-[0-2][0-9]|(?:(?:04|06|09|11)-(?:[0-2][0-9]|30)))"
+# Date in YYYY-MM-DD form
+# Examples:
+# 1990-02-29
+# 0015-07-31
+# 2456-12-31
+#! 2012-13-01
+#! 1963-09-31
+
+
+export RE_TIME="(?:[0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]"
+# Time
+# Examples:
+# 15:12:27
+# 23:59:59
+#! 24:00:00
+#! 11:25:60
+#! 17:60:15
+
+
+export RE_DATE_TIME="\w+\s+\w+\s+$RE_NUMBER\s+$RE_TIME\s+$RE_NUMBER"
+# Time and date
+# Examples:
+# Wed Feb 12 10:46:26 2020
+# Mon Mar 2 13:27:06 2020
+#! St úno 12 10:57:21 CET 2020
+#! Po úno 14 15:17:32 2010
+
+
+export RE_ADDRESS="0x$RE_NUMBER_HEX"
+# Memory address
+# Examples:
+# 0x123abc
+# 0xffffffff9abe8ae8
+# 0x0
+
+
+export RE_ADDRESS_NOT_NULL="0x[0-9A-Fa-f]*[1-9A-Fa-f]+[0-9A-Fa-f]*"
+# Memory address (not NULL)
+# Examples:
+# 0xffffffff9abe8ae8
+#! 0x0
+#! 0x0000000000000000
+
+export RE_PROCESS_PID="[^\/]+\/\d+"
+# A process with PID
+# Example:
+# sleep/4102
+# test_overhead./866185
+# in:imjournal/1096
+# random#$& test/866607
+
+export RE_EVENT_ANY="[\w\-\:\/_=,]+"
+# Name of any event (universal)
+# Examples:
+# cpu-cycles
+# cpu/event=12,umask=34/
+# r41e1
+# nfs:nfs_getattr_enter
+
+
+export RE_EVENT="[\w\-:_]+"
+# Name of an usual event
+# Examples:
+# cpu-cycles
+
+
+export RE_EVENT_RAW="r$RE_NUMBER_HEX"
+# Specification of a raw event
+# Examples:
+# r41e1
+# r1a
+
+
+export RE_EVENT_CPU="cpu/(\w+=$RE_NUMBER_HEX,?)+/p*"
+# Specification of a CPU event
+# Examples:
+# cpu/event=12,umask=34/pp
+
+
+export RE_EVENT_UNCORE="uncore/[\w_]+/"
+# Specification of an uncore event
+# Examples:
+# uncore/qhl_request_local_reads/
+
+
+export RE_EVENT_SUBSYSTEM="[\w\-]+:[\w\-]+"
+# Name of an event from subsystem
+# Examples:
+# ext4:ext4_ordered_write_end
+# sched:sched_switch
+
+
+export RE_FILE_NAME="[\w\+\.-]+"
+# A filename
+# Examples:
+# libstdc++.so.6
+#! some/path
+
+
+export RE_PATH_ABSOLUTE="(?:\/$RE_FILE_NAME)+"
+# A full filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# /usr/bin/mv
+#! some/relative/path
+#! ./some/relative/path
+
+
+export RE_PATH="(?:$RE_FILE_NAME)?$RE_PATH_ABSOLUTE"
+# A filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# ./.emacs
+# src/fs/file.c
+
+
+export RE_DSO="(?:$RE_PATH_ABSOLUTE(?: \(deleted\))?|\[kernel\.kallsyms\]|\[unknown\]|\[vdso\]|\[kernel\.vmlinux\][\.\w]*)"
+# A DSO name in various result tables
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /usr/bin/somebinart (deleted)
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# [kernel.kallsyms]
+# [kernel.vmlinux]
+# [vdso]
+# [unknown]
+
+
+export RE_LINE_COMMENT="^#.*"
+# A comment line
+# Examples:
+# # Started on Thu Sep 10 11:43:00 2015
+
+
+export RE_LINE_EMPTY="^\s*$"
+# An empty line with possible whitespaces
+# Examples:
+#
+
+
+export RE_LINE_RECORD1="^\[\s+perf\s+record:\s+Woken up $RE_NUMBER times? to write data\s+\].*$"
+# The first line of perf-record "OK" output
+# Examples:
+# [ perf record: Woken up 1 times to write data ]
+
+
+export RE_LINE_RECORD2="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+
+
+export RE_LINE_RECORD2_TOLERANT="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*(?:\(~?$RE_NUMBER samples\))?\s+\].*$"
+# The second line of perf-record "OK" output, even no samples is OK here
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_RECORD2_TOLERANT_FILENAME="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\w*\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_ls.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_aNyCaSe.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perfdata.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB /some/temp/dir/my_own.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./UPPERCASE.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./aNyKiNDoF.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_TRACE_FULL="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+=\s+(:?\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): sleep/4102 mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_ONE_PROC="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*\w+\(.*\)\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_CONTINUED="^\s*(:?$RE_NUMBER|\?)\s*\(\s*($RE_NUMBER\s*ms\s*)?\):\s*($RE_PROCESS_PID\s*)?\.\.\.\s*\[continued\]:\s+\w+\(\).*\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0x00000000
+# ? ( ): packagekitd/94838 ... [continued]: poll()) = 0 (Timeout)
+#! 0.000 ( 0.000 ms): ... [continued]: nanosleep()) =
+
+export RE_LINE_TRACE_UNFINISHED="^\s*$RE_NUMBER\s*\(\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+\.\.\.\s*$"
+# A line of perf-trace output
+# Examples:
+# 901.040 ( ): in:imjournal/1096 ppoll(ufds: 0x7f701a5adb70, nfds: 1, tsp: 0x7f701a5adaf0, sigsetsize: 8) ...
+# 613.727 ( ): gmain/1099 poll(ufds: 0x56248f6b64b0, nfds: 2, timeout_msecs: 3996) ...
+
+export RE_LINE_TRACE_SUMMARY_HEADER="\s*syscall\s+calls\s+(?:errors\s+)?total\s+min\s+avg\s+max\s+stddev"
+# A header of a perf-trace summary table
+# Example:
+# syscall calls total min avg max stddev
+# syscall calls errors total min avg max stddev
+
+
+export RE_LINE_TRACE_SUMMARY_CONTENT="^\s*\w+\s+(?:$RE_NUMBER\s+){5,6}$RE_NUMBER%"
+# A line of a perf-trace summary table
+# Example:
+# open 3 0.017 0.005 0.006 0.007 10.90%
+# openat 2 0 0.017 0.008 0.009 0.010 12.29%
+
+
+export RE_LINE_REPORT_CONTENT="^\s+$RE_NUMBER%\s+\w+\s+\S+\s+\S+\s+\S+" # FIXME
+# A line from typicap perf report --stdio output
+# Example:
+# 100.00% sleep [kernel.vmlinux] [k] syscall_return_slowpath
+
+
+export RE_TASK="\s+[\w~\/ \.\+:#-]+(?:\[-1(?:\/\d+)?\]|\[\d+(?:\/\d+)?\])"
+# A name of a task used for perf sched timehist -s
+# Example:
+# sleep[62755]
+# runtest.sh[62762]
+# gmain[705/682]
+# xfsaild/dm-0[495]
+# kworker/u8:1-ev[62714]
+# :-1[-1/62756]
+# :-1[-1]
+# :-1[62756]
+
+
+export RE_SEGFAULT=".*(?:Segmentation\sfault|SIGSEGV|\score\s|dumped|segfault).*"
+# Possible variations of the segfault message
+# Example:
+# /bin/bash: line 1: 32 Segmentation fault timeout 15s
+# Segmentation fault (core dumped)
+# Program terminated with signal SIGSEGV
+#! WARNING: 12323431 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-15)
diff --git a/tools/perf/tests/shell/common/settings.sh b/tools/perf/tests/shell/common/settings.sh
new file mode 100644
index 0000000000..361641dbaa
--- /dev/null
+++ b/tools/perf/tests/shell/common/settings.sh
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# settings.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file contains global settings for the whole testsuite.
+# Its purpose is to make it easier when it is necessary i.e. to
+# change the usual sample command which is used in all of the tests
+# in many files.
+#
+# This file is intended to be sourced in the tests.
+#
+
+#### which perf to use in the testing
+export CMD_PERF=${CMD_PERF:-`which perf`}
+
+#### basic programs examinated by perf
+export CMD_BASIC_SLEEP="sleep 0.1"
+export CMD_QUICK_SLEEP="sleep 0.01"
+export CMD_LONGER_SLEEP="sleep 2"
+export CMD_DOUBLE_LONGER_SLEEP="sleep 4"
+export CMD_VERY_LONG_SLEEP="sleep 30"
+export CMD_SIMPLE="true"
+
+#### testsuite run mode
+# define constants:
+export RUNMODE_BASIC=0
+export RUNMODE_STANDARD=1
+export RUNMODE_EXPERIMENTAL=2
+# default runmode is STANDARD
+export PERFTOOL_TESTSUITE_RUNMODE=${PERFTOOL_TESTSUITE_RUNMODE:-$RUNMODE_STANDARD}
+
+#### common settings
+export TESTLOG_VERBOSITY=${TESTLOG_VERBOSITY:-2}
+export TESTLOG_FORCE_COLOR=${TESTLOG_FORCE_COLOR:-n}
+export TESTLOG_ERR_MSG_MAX_LINES=${TESTLOG_ERR_MSG_MAX_LINES:-20}
+export TESTLOG_CLEAN=${TESTLOG_CLEAN:-y}
+
+#### other environment-related settings
+export TEST_IGNORE_MISSING_PMU=${TEST_IGNORE_MISSING_PMU:-n}
+
+#### clear locale
+export LC_ALL=C
+
+#### colors
+if [ -t 1 -o "$TESTLOG_FORCE_COLOR" = "yes" ]; then
+ export MPASS="\e[32m"
+ export MALLPASS="\e[1;32m"
+ export MFAIL="\e[31m"
+ export MALLFAIL="\e[1;31m"
+ export MWARN="\e[1;35m"
+ export MSKIP="\e[33m"
+ export MHIGH="\e[1;33m"
+ export MEND="\e[m"
+else
+ export MPASS=""
+ export MALLPASS=""
+ export MFAIL=""
+ export MALLFAIL=""
+ export MWARN=""
+ export MSKIP=""
+ export MHIGH=""
+ export MEND=""
+fi
+
+
+#### test parametrization
+if [ ! -d ./common ]; then
+ # set parameters based on runmode
+ if [ -f ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh ]; then
+ . ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh
+ fi
+ # if some parameters haven't been set until now, set them to default
+ if [ -f ../common/parametrization.sh ]; then
+ . ../common/parametrization.sh
+ fi
+fi
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 5d59c32ae3..561c93b75d 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -3,7 +3,7 @@
perf_has_symbol()
{
- if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+ if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
echo "perf does have symbol '$1'"
return 0
fi
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index ea55d5ea1c..abc1fd7377 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -15,6 +15,7 @@ ap.add_argument('--event', action='store_true')
ap.add_argument('--per-core', action='store_true')
ap.add_argument('--per-thread', action='store_true')
ap.add_argument('--per-cache', action='store_true')
+ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
@@ -49,6 +50,7 @@ def check_json_output(expected_items):
'cgroup': lambda x: True,
'cpu': lambda x: isint(x),
'cache': lambda x: True,
+ 'cluster': lambda x: True,
'die': lambda x: True,
'event': lambda x: True,
'event-runtime': lambda x: isfloat(x),
@@ -88,7 +90,7 @@ try:
expected_items = 7
elif args.interval or args.per_thread or args.system_wide_no_aggr:
expected_items = 8
- elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:
+ elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
expected_items = 9
else:
# If no option is specified, don't check the number of items.
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 50a34a9cc0..a2d2352521 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0
import re
import csv
import json
@@ -6,36 +6,61 @@ import argparse
from pathlib import Path
import subprocess
+
+class TestError:
+ def __init__(self, metric: list[str], wl: str, value: list[float], low: float, up=float('nan'), description=str()):
+ self.metric: list = metric # multiple metrics in relationship type tests
+ self.workloads = [wl] # multiple workloads possible
+ self.collectedValue: list = value
+ self.valueLowBound = low
+ self.valueUpBound = up
+ self.description = description
+
+ def __repr__(self) -> str:
+ if len(self.metric) > 1:
+ return "\nMetric Relationship Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2} \n\
+ \tbut expected value range is [{3}, {4}]\n\
+ \tRelationship rule description: \'{5}\'".format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound, self.description)
+ elif len(self.collectedValue) == 0:
+ return "\nNo Metric Value Error: \tMetric {0} returns with no value \n\
+ \tworkload(s): {1}".format(self.metric, self.workloads)
+ else:
+ return "\nWrong Metric Value Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2}\n\
+ \tbut expected value range is [{3}, {4}]"\
+ .format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound)
+
+
class Validator:
def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
- self.collectlist:str = metrics
+ self.collectlist: str = metrics
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
self.workloads = [x for x in workload.split(",") if x]
- self.wlidx = 0 # idx of current workloads
- self.allresults = dict() # metric results of all workload
- self.allignoremetrics = dict() # metrics with no results or negative results
- self.allfailtests = dict()
+ self.wlidx = 0 # idx of current workloads
+ self.allresults = dict() # metric results of all workload
self.alltotalcnt = dict()
self.allpassedcnt = dict()
- self.allerrlist = dict()
- self.results = dict() # metric results of current workload
+ self.results = dict() # metric results of current workload
# vars for test pass/failure statistics
- self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
- self.failtests = dict()
+ # metrics with no results or negative results, neg result counts failed tests
+ self.ignoremetrics = set()
self.totalcnt = 0
self.passedcnt = 0
# vars for errors
self.errlist = list()
# vars for Rule Generator
- self.pctgmetrics = set() # Percentage rule
+ self.pctgmetrics = set() # Percentage rule
# vars for debug
self.datafname = datafname
@@ -69,10 +94,10 @@ class Validator:
ensure_ascii=True,
indent=4)
- def get_results(self, idx:int = 0):
+ def get_results(self, idx: int = 0):
return self.results[idx]
- def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
+ def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
Get bounds and tolerance from lb, ub, and error.
If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance.
@@ -85,7 +110,7 @@ class Validator:
tolerance, denormalized base on upper bound value
"""
# init ubv and lbv to invalid values
- def get_bound_value (bound, initval, ridx):
+ def get_bound_value(bound, initval, ridx):
val = initval
if isinstance(bound, int) or isinstance(bound, float):
val = bound
@@ -113,10 +138,10 @@ class Validator:
return lbv, ubv, denormerr
- def get_value(self, name:str, ridx:int = 0) -> list:
+ def get_value(self, name: str, ridx: int = 0) -> list:
"""
Get value of the metric from self.results.
- If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist.
+ If result of this metric is not provided, the metric name will be added into self.ignoremetics.
All future test(s) on this metric will fail.
@param name: name of the metric
@@ -142,7 +167,7 @@ class Validator:
Check if metrics value are non-negative.
One metric is counted as one test.
Failure: when metric value is negative or not provided.
- Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics.
+ Metrics with negative value will be added into self.ignoremetrics.
"""
negmetric = dict()
pcnt = 0
@@ -155,25 +180,27 @@ class Validator:
else:
pcnt += 1
tcnt += 1
+ # The first round collect_perf() run these metrics with simple workload
+ # "true". We give metrics a second chance with a longer workload if less
+ # than 20 metrics failed positive test.
if len(rerun) > 0 and len(rerun) < 20:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in negmetric: continue
+ if name not in negmetric:
+ continue
if val >= 0:
del negmetric[name]
pcnt += 1
- self.failtests['PositiveValueTest']['Total Tests'] = tcnt
- self.failtests['PositiveValueTest']['Passed Tests'] = pcnt
if len(negmetric.keys()):
self.ignoremetrics.update(negmetric.keys())
- negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()]
- self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage})
+ self.errlist.extend(
+ [TestError([m], self.workloads[self.wlidx], negmetric[m], 0) for m in negmetric.keys()])
return
- def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0):
+ def evaluate_formula(self, formula: str, alias: dict, ridx: int = 0):
"""
Evaluate the value of formula.
@@ -187,10 +214,11 @@ class Validator:
sign = "+"
f = str()
- #TODO: support parenthesis?
+ # TODO: support parenthesis?
for i in range(len(formula)):
if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
- s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]]
+ s = alias[formula[b:i]] if i + \
+ 1 < len(formula) else alias[formula[b:]]
v = self.get_value(s, ridx)
if not v:
errs.append(s)
@@ -228,49 +256,49 @@ class Validator:
alias = dict()
for m in rule['Metrics']:
alias[m['Alias']] = m['Name']
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
- val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
+ val, f = self.evaluate_formula(
+ rule['Formula'], alias, ridx=rule['RuleIndex'])
+
+ lb = rule['RangeLower']
+ ub = rule['RangeUpper']
+ if isinstance(lb, str):
+ if lb in alias:
+ lb = alias[lb]
+ if isinstance(ub, str):
+ if ub in alias:
+ ub = alias[ub]
+
if val == -1:
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [],
+ lb, ub, rule['Description']))
elif not self.check_bound(val, lbv, ubv, t):
- lb = rule['RangeLower']
- ub = rule['RangeUpper']
- if isinstance(lb, str):
- if lb in alias:
- lb = alias[lb]
- if isinstance(ub, str):
- if ub in alias:
- ub = alias[ub]
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f,
- 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb),
- 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub),
- 'ErrorThreshold': t, 'CollectedValue': val})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [val],
+ lb, ub, rule['Description']))
else:
self.passedcnt += 1
- self.failtests['RelationshipTest']['Passed Tests'] += 1
self.totalcnt += 1
- self.failtests['RelationshipTest']['Total Tests'] += 1
return
-
# Single Metric Test
- def single_test(self, rule:dict):
+ def single_test(self, rule: dict):
"""
Validate if the metrics are in the required value range.
eg. lower_bound <= metrics_value <= upper_bound
One metric is counted as one test in this type of test.
One rule may include one or more metrics.
Failure: when the metric value not provided or the value is outside the bounds.
- This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
+ This test updates self.total_cnt.
@param rule: dict with metrics to validate and the value range requirement
"""
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
metrics = rule['Metrics']
passcnt = 0
totalcnt = 0
- faillist = list()
failures = dict()
rerun = list()
for m in metrics:
@@ -286,25 +314,20 @@ class Validator:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in failures: continue
+ if name not in failures:
+ continue
if self.check_bound(val, lbv, ubv, t):
passcnt += 1
del failures[name]
else:
- failures[name] = val
+ failures[name] = [val]
self.results[0][name] = val
self.totalcnt += totalcnt
self.passedcnt += passcnt
- self.failtests['SingleMetricTest']['Total Tests'] += totalcnt
- self.failtests['SingleMetricTest']['Passed Tests'] += passcnt
if len(failures.keys()) != 0:
- faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()]
- self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'],
- 'RangeLower': rule['RangeLower'],
- 'RangeUpper': rule['RangeUpper'],
- 'ErrorThreshold':rule['ErrorThreshold'],
- 'Failure':faillist})
+ self.errlist.extend([TestError([name], self.workloads[self.wlidx], val,
+ rule['RangeLower'], rule['RangeUpper']) for name, val in failures.items()])
return
@@ -312,19 +335,11 @@ class Validator:
"""
Create final report and write into a JSON file.
"""
- alldata = list()
- for i in range(0, len(self.workloads)):
- reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]}
- data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i],
- "Errors":self.allerrlist[i]}
- alldata.append({"Workload": self.workloads[i], "Report": data})
-
- json_str = json.dumps(alldata, indent=4)
- print("Test validation finished. Final report: ")
- print(json_str)
+ print(self.errlist)
if self.debug:
- allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))]
+ allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]}
+ for i in range(0, len(self.workloads))]
self.json_dump(allres, self.datafname)
def check_rule(self, testtype, metric_list):
@@ -342,13 +357,13 @@ class Validator:
return True
# Start of Collector and Converter
- def convert(self, data: list, metricvalues:dict):
+ def convert(self, data: list, metricvalues: dict):
"""
Convert collected metric data from the -j output to dict of {metric_name:value}.
"""
for json_string in data:
try:
- result =json.loads(json_string)
+ result = json.loads(json_string)
if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
else result["metric-unit"]
@@ -365,9 +380,10 @@ class Validator:
print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
+ if data[0][0] != '{':
+ data[0] = data[0][data[0].find('{'):]
return data
-
def collect_perf(self, workload: str):
"""
Collect metric data with "perf stat -M" on given workload with -a and -j.
@@ -385,14 +401,18 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
+ collectlist[rule["RuleIndex"]] = [
+ ",".join(list(set(metrics)))]
for idx, metrics in collectlist.items():
- if idx == 0: wl = "true"
- else: wl = workload
+ if idx == 0:
+ wl = "true"
+ else:
+ wl = workload
for metric in metrics:
data = self._run_perf(metric, wl)
- if idx not in self.results: self.results[idx] = dict()
+ if idx not in self.results:
+ self.results[idx] = dict()
self.convert(data, self.results[idx])
return
@@ -412,7 +432,8 @@ class Validator:
2) create metric name list
"""
command = ['perf', 'list', '-j', '--details', 'metrics']
- cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
+ cmd = subprocess.run(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, encoding='utf-8')
try:
data = json.loads(cmd.stdout)
for m in data:
@@ -453,12 +474,12 @@ class Validator:
rules = data['RelationshipRules']
self.skiplist = set([name.lower() for name in data['SkipList']])
self.rules = self.remove_unsupported_rules(rules)
- pctgrule = {'RuleIndex':0,
- 'TestType':'SingleMetricTest',
- 'RangeLower':'0',
+ pctgrule = {'RuleIndex': 0,
+ 'TestType': 'SingleMetricTest',
+ 'RangeLower': '0',
'RangeUpper': '100',
'ErrorThreshold': self.tolerance,
- 'Description':'Metrics in percent unit have value with in [0, 100]',
+ 'Description': 'Metrics in percent unit have value with in [0, 100]',
'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]}
self.rules.append(pctgrule)
@@ -469,8 +490,9 @@ class Validator:
idx += 1
if self.debug:
- #TODO: need to test and generate file name correctly
- data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]}
+ # TODO: need to test and generate file name correctly
+ data = {'RelationshipRules': self.rules, 'SupportedMetrics': [
+ {"MetricName": name} for name in self.metrics]}
self.json_dump(data, self.fullrulefname)
return
@@ -482,20 +504,17 @@ class Validator:
@param key: key to the dictionaries (index of self.workloads).
'''
self.allresults[key] = self.results
- self.allignoremetrics[key] = self.ignoremetrics
- self.allfailtests[key] = self.failtests
self.alltotalcnt[key] = self.totalcnt
self.allpassedcnt[key] = self.passedcnt
- self.allerrlist[key] = self.errlist
- #Initialize data structures before data validation of each workload
+ # Initialize data structures before data validation of each workload
def _init_data(self):
- testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest']
+ testtypes = ['PositiveValueTest',
+ 'RelationshipTest', 'SingleMetricTest']
self.results = dict()
- self.ignoremetrics= set()
+ self.ignoremetrics = set()
self.errlist = list()
- self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes}
self.totalcnt = 0
self.passedcnt = 0
@@ -525,32 +544,33 @@ class Validator:
testtype = r['TestType']
if not self.check_rule(testtype, r['Metrics']):
continue
- if testtype == 'RelationshipTest':
+ if testtype == 'RelationshipTest':
self.relationship_test(r)
elif testtype == 'SingleMetricTest':
self.single_test(r)
else:
print("Unsupported Test Type: ", testtype)
- self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
- self._storewldata(i)
print("Workload: ", self.workloads[i])
- print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests'])
- print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
print("Total Test Count: ", self.totalcnt)
print("Passed Test Count: ", self.passedcnt)
-
+ self._storewldata(i)
self.create_report()
- return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values())
+ return len(self.errlist) > 0
# End of Class Validator
def main() -> None:
- parser = argparse.ArgumentParser(description="Launch metric value validation")
-
- parser.add_argument("-rule", help="Base validation rule file", required=True)
- parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True)
- parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False)
- parser.add_argument("-wl", help="Workload to run while data collection", default="true")
+ parser = argparse.ArgumentParser(
+ description="Launch metric value validation")
+
+ parser.add_argument(
+ "-rule", help="Base validation rule file", required=True)
+ parser.add_argument(
+ "-output_dir", help="Path for validator output file, report file", required=True)
+ parser.add_argument("-debug", help="Debug run, save intermediate data to files",
+ action="store_true", default=False)
+ parser.add_argument(
+ "-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
args = parser.parse_args()
outpath = Path(args.output_dir)
@@ -559,8 +579,8 @@ def main() -> None:
datafile = Path.joinpath(outpath, 'perf_data.json')
validator = Validator(args.rule, reportf, debug=args.debug,
- datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ datafname=datafile, fullrulefname=fullrule, workload=args.wl,
+ metrics=args.m)
ret = validator.test()
return ret
@@ -569,6 +589,3 @@ def main() -> None:
if __name__ == "__main__":
import sys
sys.exit(main())
-
-
-
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 3cc158a643..c81d6a9f79 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -97,6 +97,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking $1 output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-cluster -a $2 true
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking $1 output: per die "
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
new file mode 100755
index 0000000000..a0fec33a03
--- /dev/null
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# perftool-testsuite_probe
+# SPDX-License-Identifier: GPL-2.0
+
+test -d "$(dirname "$0")/base_probe" || exit 2
+cd "$(dirname "$0")/base_probe" || exit 2
+status=0
+
+PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX)
+export PERFSUITE_RUN_DIR
+
+for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable
+ test -x "$testcase" || continue
+ ./"$testcase"
+ (( status += $? ))
+done
+
+if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then
+ rm -rf "$PERFSUITE_RUN_DIR"
+fi
+
+test $status -ne 0 && exit 1
+exit 0
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index f1818fa6d9..fc2d8cc6e5 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -42,6 +42,7 @@ function commachecker()
;; "--per-socket") exp=8
;; "--per-node") exp=8
;; "--per-die") exp=8
+ ;; "--per-cluster") exp=8
;; "--per-cache") exp=8
esac
@@ -79,6 +80,7 @@ then
check_system_wide_no_aggr "CSV" "$perf_cmd"
check_per_core "CSV" "$perf_cmd"
check_per_cache_instance "CSV" "$perf_cmd"
+ check_per_cluster "CSV" "$perf_cmd"
check_per_die "CSV" "$perf_cmd"
check_per_socket "CSV" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 3bc900533a..2b9c6212df 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -122,6 +122,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking json output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-cluster -a true 2>&1 | $PYTHON $pythonchecker --per-cluster
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking json output: per die "
@@ -200,6 +212,7 @@ then
check_system_wide_no_aggr
check_per_core
check_per_cache_instance
+ check_per_cluster
check_per_die
check_per_socket
else
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index 4fcdd1a914..cbf2894b2c 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound")
cleanup() {
rm -f "${stat_output}"
@@ -40,6 +40,7 @@ function commachecker()
;; "--per-node") prefix=3
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
+ ;; "--per-cluster") prefix=3
esac
while read line
@@ -99,6 +100,7 @@ then
check_system_wide_no_aggr "STD" "$perf_cmd"
check_per_core "STD" "$perf_cmd"
check_per_cache_instance "STD" "$perf_cmd"
+ check_per_cluster "STD" "$perf_cmd"
check_per_die "STD" "$perf_cmd"
check_per_socket "STD" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index a87bb2814b..2d92098747 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -4,19 +4,19 @@
set -e
-# check whether $2 is within +/- 10% of $1
+# check whether $2 is within +/- 20% of $1
compare_number()
{
first_num=$1
second_num=$2
- # upper bound is first_num * 110%
- upper=$(expr $first_num + $first_num / 10 )
- # lower bound is first_num * 90%
- lower=$(expr $first_num - $first_num / 10 )
+ # upper bound is first_num * 120%
+ upper=$(expr $first_num + $first_num / 5 )
+ # lower bound is first_num * 80%
+ lower=$(expr $first_num - $first_num / 5 )
if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
- echo "The difference between $first_num and $second_num are greater than 10%."
+ echo "The difference between $first_num and $second_num are greater than 20%."
exit 1
fi
}
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 7ca172599a..279f19c591 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -19,6 +19,8 @@ echo "Output will be stored in: $tmpdir"
$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
ret=$?
rm -rf $tmpdir
-
+if [ $ret -ne 0 ]; then
+ echo "Metric validation return with erros. Please check metrics reported with errors."
+fi
exit $ret
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index e342e6c8aa..83b53591b1 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -8,6 +8,12 @@ shelldir=$(dirname "$0")
lscpu | grep -q "aarch64" || exit 2
+if perf version --build-options | grep HAVE_DWARF_UNWIND_SUPPORT | grep -q OFF
+then
+ echo "Skipping, no dwarf unwind support"
+ exit 2
+fi
+
skip_test_missing_symbol leafloop
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
index 65dd852071..3302ea0b96 100755
--- a/tools/perf/tests/shell/test_arm_coresight.sh
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -188,7 +188,7 @@ arm_cs_etm_snapshot_test() {
arm_cs_etm_basic_test() {
echo "Recording trace with '$*'"
- perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
+ perf record -o ${perfdata} "$@" -m,8M -- ls > /dev/null 2>&1
perf_script_branch_samples ls &&
perf_report_branch_samples ls &&