diff options
Diffstat (limited to '')
-rw-r--r-- | tools/perf/tests/shell/lib/coresight.sh | 132 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/perf_json_output_lint.py | 96 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/probe.sh | 12 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/probe_vfs_getname.sh | 24 | ||||
-rw-r--r-- | tools/perf/tests/shell/lib/waiting.sh | 77 |
5 files changed, 341 insertions, 0 deletions
diff --git a/tools/perf/tests/shell/lib/coresight.sh b/tools/perf/tests/shell/lib/coresight.sh new file mode 100644 index 000000000..45a147725 --- /dev/null +++ b/tools/perf/tests/shell/lib/coresight.sh @@ -0,0 +1,132 @@ +# SPDX-License-Identifier: GPL-2.0 +# Carsten Haitzler <carsten.haitzler@arm.com>, 2021 + +# This is sourced from a driver script so no need for #!/bin... etc. at the +# top - the assumption below is that it runs as part of sourcing after the +# test sets up some basic env vars to say what it is. + +# This currently works with ETMv4 / ETF not any other packet types at thi +# point. This will need changes if that changes. + +# perf record options for the perf tests to use +PERFRECMEM="-m ,16M" +PERFRECOPT="$PERFRECMEM -e cs_etm//u" + +TOOLS=$(dirname $0) +DIR="$TOOLS/$TEST" +BIN="$DIR/$TEST" +# If the test tool/binary does not exist and is executable then skip the test +if ! test -x "$BIN"; then exit 2; fi +DATD="." +# If the data dir env is set then make the data dir use that instead of ./ +if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then + DATD="$PERF_TEST_CORESIGHT_DATADIR"; +fi +# If the stat dir env is set then make the data dir use that instead of ./ +STATD="." +if test -n "$PERF_TEST_CORESIGHT_STATDIR"; then + STATD="$PERF_TEST_CORESIGHT_STATDIR"; +fi + +# Called if the test fails - error code 1 +err() { + echo "$1" + exit 1 +} + +# Check that some statistics from our perf +check_val_min() { + STATF="$4" + if test "$2" -lt "$3"; then + echo ", FAILED" >> "$STATF" + err "Sanity check number of $1 is too low ($2 < $3)" + fi +} + +perf_dump_aux_verify() { + # Some basic checking that the AUX chunk contains some sensible data + # to see that we are recording something and at least a minimum + # amount of it. We should almost always see Fn packets in just about + # anything but certainly we will see some trace info and async + # packets + DUMP="$DATD/perf-tmp-aux-dump.txt" + perf report --stdio --dump -i "$1" | \ + grep -o -e I_ATOM_F -e I_ASYNC -e I_TRACE_INFO > "$DUMP" + # Simply count how many of these packets we find to see that we are + # producing a reasonable amount of data - exact checks are not sane + # as this is a lossy process where we may lose some blocks and the + # compiler may produce different code depending on the compiler and + # optimization options, so this is rough just to see if we're + # either missing almost all the data or all of it + ATOM_FX_NUM=`grep I_ATOM_F "$DUMP" | wc -l` + ASYNC_NUM=`grep I_ASYNC "$DUMP" | wc -l` + TRACE_INFO_NUM=`grep I_TRACE_INFO "$DUMP" | wc -l` + rm -f "$DUMP" + + # Arguments provide minimums for a pass + CHECK_FX_MIN="$2" + CHECK_ASYNC_MIN="$3" + CHECK_TRACE_INFO_MIN="$4" + + # Write out statistics, so over time you can track results to see if + # there is a pattern - for example we have less "noisy" results that + # produce more consistent amounts of data each run, to see if over + # time any techinques to minimize data loss are having an effect or + # not + STATF="$STATD/stats-$TEST-$DATV.csv" + if ! test -f "$STATF"; then + echo "ATOM Fx Count, Minimum, ASYNC Count, Minimum, TRACE INFO Count, Minimum" > "$STATF" + fi + echo -n "$ATOM_FX_NUM, $CHECK_FX_MIN, $ASYNC_NUM, $CHECK_ASYNC_MIN, $TRACE_INFO_NUM, $CHECK_TRACE_INFO_MIN" >> "$STATF" + + # Actually check to see if we passed or failed. + check_val_min "ATOM_FX" "$ATOM_FX_NUM" "$CHECK_FX_MIN" "$STATF" + check_val_min "ASYNC" "$ASYNC_NUM" "$CHECK_ASYNC_MIN" "$STATF" + check_val_min "TRACE_INFO" "$TRACE_INFO_NUM" "$CHECK_TRACE_INFO_MIN" "$STATF" + echo ", Ok" >> "$STATF" +} + +perf_dump_aux_tid_verify() { + # Specifically crafted test will produce a list of Tread ID's to + # stdout that need to be checked to see that they have had trace + # info collected in AUX blocks in the perf data. This will go + # through all the TID's that are listed as CID=0xabcdef and see + # that all the Thread IDs the test tool reports are in the perf + # data AUX chunks + + # The TID test tools will print a TID per stdout line that are being + # tested + TIDS=`cat "$2"` + # Scan the perf report to find the TIDs that are actually CID in hex + # and build a list of the ones found + FOUND_TIDS=`perf report --stdio --dump -i "$1" | \ + grep -o "CID=0x[0-9a-z]\+" | sed 's/CID=//g' | \ + uniq | sort | uniq` + # No CID=xxx found - maybe your kernel is reporting these as + # VMID=xxx so look there + if test -z "$FOUND_TIDS"; then + FOUND_TIDS=`perf report --stdio --dump -i "$1" | \ + grep -o "VMID=0x[0-9a-z]\+" | sed 's/VMID=//g' | \ + uniq | sort | uniq` + fi + + # Iterate over the list of TIDs that the test says it has and find + # them in the TIDs found in the perf report + MISSING="" + for TID2 in $TIDS; do + FOUND="" + for TIDHEX in $FOUND_TIDS; do + TID=`printf "%i" $TIDHEX` + if test "$TID" -eq "$TID2"; then + FOUND="y" + break + fi + done + if test -z "$FOUND"; then + MISSING="$MISSING $TID" + fi + done + if test -n "$MISSING"; then + err "Thread IDs $MISSING not found in perf AUX data" + fi +} diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py new file mode 100644 index 000000000..d90f8d102 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py @@ -0,0 +1,96 @@ +#!/usr/bin/python +# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +# Basic sanity check of perf JSON output as specified in the man page. + +import argparse +import sys +import json + +ap = argparse.ArgumentParser() +ap.add_argument('--no-args', action='store_true') +ap.add_argument('--interval', action='store_true') +ap.add_argument('--system-wide-no-aggr', action='store_true') +ap.add_argument('--system-wide', action='store_true') +ap.add_argument('--event', action='store_true') +ap.add_argument('--per-core', action='store_true') +ap.add_argument('--per-thread', action='store_true') +ap.add_argument('--per-die', action='store_true') +ap.add_argument('--per-node', action='store_true') +ap.add_argument('--per-socket', action='store_true') +args = ap.parse_args() + +Lines = sys.stdin.readlines() + +def isfloat(num): + try: + float(num) + return True + except ValueError: + return False + + +def isint(num): + try: + int(num) + return True + except ValueError: + return False + +def is_counter_value(num): + return isfloat(num) or num == '<not counted>' or num == '<not supported>' + +def check_json_output(expected_items): + if expected_items != -1: + for line in Lines: + if 'failed' not in line: + count = 0 + count = line.count(',') + if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line: + # Events that generate >1 metric may have isolated metric + # values and possibly other prefixes like interval, core and + # aggregate-number. + continue + if count != expected_items: + raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}' + f' in \'{line}\'') + checks = { + 'aggregate-number': lambda x: isfloat(x), + 'core': lambda x: True, + 'counter-value': lambda x: is_counter_value(x), + 'cgroup': lambda x: True, + 'cpu': lambda x: isint(x), + 'die': lambda x: True, + 'event': lambda x: True, + 'event-runtime': lambda x: isfloat(x), + 'interval': lambda x: isfloat(x), + 'metric-unit': lambda x: True, + 'metric-value': lambda x: isfloat(x), + 'node': lambda x: True, + 'pcnt-running': lambda x: isfloat(x), + 'socket': lambda x: True, + 'thread': lambda x: True, + 'unit': lambda x: True, + } + input = '[\n' + ','.join(Lines) + '\n]' + for item in json.loads(input): + for key, value in item.items(): + if key not in checks: + raise RuntimeError(f'Unexpected key: key={key} value={value}') + if not checks[key](value): + raise RuntimeError(f'Check failed for: key={key} value={value}') + + +try: + if args.no_args or args.system_wide or args.event: + expected_items = 6 + elif args.interval or args.per_thread or args.system_wide_no_aggr: + expected_items = 7 + elif args.per_core or args.per_socket or args.per_node or args.per_die: + expected_items = 8 + else: + # If no option is specified, don't check the number of items. + expected_items = -1 + check_json_output(expected_items) +except: + print('Test failed for input:\n' + '\n'.join(Lines)) + raise diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh new file mode 100644 index 000000000..51e3f60ba --- /dev/null +++ b/tools/perf/tests/shell/lib/probe.sh @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017 + +skip_if_no_perf_probe() { + perf probe 2>&1 | grep -q 'is not a perf-command' && return 2 + return 0 +} + +skip_if_no_perf_trace() { + perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2 + return 0 +} diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh new file mode 100644 index 000000000..b616d42bd --- /dev/null +++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh @@ -0,0 +1,24 @@ +# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017 + +perf probe -l 2>&1 | grep -q probe:vfs_getname +had_vfs_getname=$? + +cleanup_probe_vfs_getname() { + if [ $had_vfs_getname -eq 1 ] ; then + perf probe -q -d probe:vfs_getname* + fi +} + +add_probe_vfs_getname() { + local verbose=$1 + if [ $had_vfs_getname -eq 1 ] ; then + line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/') + perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \ + perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring" + fi +} + +skip_if_no_debuginfo() { + add_probe_vfs_getname -v 2>&1 | egrep -q "^(Failed to find the path for the kernel|Debuginfo-analysis is not supported)|(file has no debug information)" && return 2 + return 1 +} diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh new file mode 100644 index 000000000..e7a39134a --- /dev/null +++ b/tools/perf/tests/shell/lib/waiting.sh @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: GPL-2.0 + +tenths=date\ +%s%1N + +# Wait for PID $1 to have $2 number of threads started +# Time out after $3 tenths of a second or 5 seconds if $3 is "" +wait_for_threads() +{ + tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50 + start_time=$($tenths) + while [ -e "/proc/$1/task" ] ; do + th_cnt=$(find "/proc/$1/task" -mindepth 1 -maxdepth 1 -printf x | wc -c) + if [ "${th_cnt}" -ge "$2" ] ; then + return 0 + fi + # Wait at most tm_out tenths of a second + if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then + echo "PID $1 does not have $2 threads" + return 1 + fi + done + return 1 +} + +# Wait for perf record -vvv 2>$2 with PID $1 to start by looking at file $2 +# It depends on capturing perf record debug message "perf record has started" +# Time out after $3 tenths of a second or 5 seconds if $3 is "" +wait_for_perf_to_start() +{ + tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50 + echo "Waiting for \"perf record has started\" message" + start_time=$($tenths) + while [ -e "/proc/$1" ] ; do + if grep -q "perf record has started" "$2" ; then + echo OK + break + fi + # Wait at most tm_out tenths of a second + if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then + echo "perf recording did not start" + return 1 + fi + done + return 0 +} + +# Wait for process PID %1 to exit +# Time out after $2 tenths of a second or 5 seconds if $2 is "" +wait_for_process_to_exit() +{ + tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=50 + start_time=$($tenths) + while [ -e "/proc/$1" ] ; do + # Wait at most tm_out tenths of a second + if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then + echo "PID $1 did not exit as expected" + return 1 + fi + done + return 0 +} + +# Check if PID $1 is still running after $2 tenths of a second +# or 0.3 seconds if $2 is "" +is_running() +{ + tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=3 + start_time=$($tenths) + while [ -e "/proc/$1" ] ; do + # Check for at least tm_out tenths of a second + if [ $(($($tenths) - start_time)) -gt $tm_out ] ; then + return 0 + fi + done + echo "PID $1 exited prematurely" + return 1 +} |