summaryrefslogtreecommitdiffstats
path: root/tools/perf/tests/shell
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /tools/perf/tests/shell
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/tests/shell')
-rwxr-xr-xtools/perf/tests/shell/buildid.sh175
-rw-r--r--tools/perf/tests/shell/coresight/Makefile29
-rw-r--r--tools/perf/tests/shell/coresight/Makefile.miniconfig14
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh18
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/Makefile34
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S28
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c79
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh18
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/thread_loop.c86
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh19
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh19
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c74
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh18
-rwxr-xr-xtools/perf/tests/shell/daemon.sh524
-rw-r--r--tools/perf/tests/shell/lib/coresight.sh132
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py99
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py574
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation_rules.json398
-rw-r--r--tools/perf/tests/shell/lib/probe.sh13
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh33
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh170
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh78
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh284
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh34
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh16
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh105
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh46
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh37
-rwxr-xr-xtools/perf/tests/shell/record.sh163
-rwxr-xr-xtools/perf/tests/shell/record_bpf_filter.sh134
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh103
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh87
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh31
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh219
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh81
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh107
-rwxr-xr-xtools/perf/tests/shell/stat.sh156
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh43
-rwxr-xr-xtools/perf/tests/shell/stat_all_pfm.sh51
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh23
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh45
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh79
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh30
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh41
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh212
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh113
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh50
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh76
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh66
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh687
-rwxr-xr-xtools/perf/tests/shell/test_java_symbol.sh75
-rwxr-xr-xtools/perf/tests/shell/test_perf_data_converter_json.sh72
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh175
-rwxr-xr-xtools/perf/tests/shell/test_uprobe_from_different_cu.sh83
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh43
63 files changed, 6314 insertions, 0 deletions
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
new file mode 100755
index 0000000000..3383ca3399
--- /dev/null
+++ b/tools/perf/tests/shell/buildid.sh
@@ -0,0 +1,175 @@
+#!/bin/sh
+# build id cache operations
+# SPDX-License-Identifier: GPL-2.0
+
+# skip if there's no readelf
+if ! [ -x "$(command -v readelf)" ]; then
+ echo "failed: no readelf, install binutils"
+ exit 2
+fi
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+# check what we need to test windows binaries
+add_pe=1
+run_pe=1
+if ! perf version --build-options | grep -q 'libbfd: .* on '; then
+ echo "WARNING: perf not built with libbfd. PE binaries will not be tested."
+ add_pe=0
+ run_pe=0
+fi
+if ! which wine > /dev/null; then
+ echo "WARNING: wine not found. PE binaries will not be run."
+ run_pe=0
+fi
+
+# set up wine
+if [ ${run_pe} -eq 1 ]; then
+ wineprefix=$(mktemp -d /tmp/perf.wineprefix.XXX)
+ export WINEPREFIX=${wineprefix}
+ # clear display variables to prevent wine from popping up dialogs
+ unset DISPLAY
+ unset WAYLAND_DISPLAY
+fi
+
+ex_md5=$(mktemp /tmp/perf.ex.MD5.XXX)
+ex_sha1=$(mktemp /tmp/perf.ex.SHA1.XXX)
+ex_pe=$(dirname $0)/../pe-file.exe
+
+echo 'int main(void) { return 0; }' | cc -Wl,--build-id=sha1 -o ${ex_sha1} -x c -
+echo 'int main(void) { return 0; }' | cc -Wl,--build-id=md5 -o ${ex_md5} -x c -
+
+echo "test binaries: ${ex_sha1} ${ex_md5} ${ex_pe}"
+
+check()
+{
+ case $1 in
+ *.exe)
+ # We don't have a tool that can pull a nicely formatted build-id out of
+ # a PE file, but we can extract the whole section with objcopy and
+ # format it ourselves. The .buildid section is a Debug Directory
+ # containing a CodeView entry:
+ # https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#debug-directory-image-only
+ # https://github.com/dotnet/runtime/blob/da94c022576a5c3bbc0e896f006565905eb137f9/docs/design/specs/PE-COFF.md
+ # The build-id starts at byte 33 and must be rearranged into a GUID.
+ id=`objcopy -O binary --only-section=.buildid $1 /dev/stdout | \
+ cut -c 33-48 | hexdump -ve '/1 "%02x"' | \
+ sed 's@^\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(.*\)0a$@\4\3\2\1\6\5\8\7\9@'`
+ ;;
+ *)
+ id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
+ ;;
+ esac
+ echo "build id: ${id}"
+
+ id_file=${id#??}
+ id_dir=${id%$id_file}
+ link=$build_id_dir/.build-id/$id_dir/$id_file
+ echo "link: ${link}"
+
+ if [ ! -h $link ]; then
+ echo "failed: link ${link} does not exist"
+ exit 1
+ fi
+
+ file=${build_id_dir}/.build-id/$id_dir/`readlink ${link}`/elf
+ echo "file: ${file}"
+
+ # Check for file permission of original file
+ # in case of pe-file.exe file
+ echo $1 | grep ".exe"
+ if [ $? -eq 0 ]; then
+ if [ -x $1 ] && [ ! -x $file ]; then
+ echo "failed: file ${file} executable does not exist"
+ exit 1
+ fi
+
+ if [ ! -x $file ] && [ ! -e $file ]; then
+ echo "failed: file ${file} does not exist"
+ exit 1
+ fi
+ elif [ ! -x $file ]; then
+ echo "failed: file ${file} does not exist"
+ exit 1
+ fi
+
+ diff ${file} ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${file} do not match"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -l | grep ${id}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${id} is not reported by \"perf buildid-cache -l\""
+ exit 1
+ fi
+
+ echo "OK for ${1}"
+}
+
+test_add()
+{
+ build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ ${perf} buildid-cache -v -a ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: add ${1} to build id cache"
+ exit 1
+ fi
+
+ check ${1}
+
+ rm -rf ${build_id_dir}
+}
+
+test_record()
+{
+ data=$(mktemp /tmp/perf.data.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ log_out=$(mktemp /tmp/perf.log.out.XXX)
+ log_err=$(mktemp /tmp/perf.log.err.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ echo "running: perf record $*"
+ ${perf} record --buildid-all -o ${data} "$@" 1>${log_out} 2>${log_err}
+ if [ $? -ne 0 ]; then
+ echo "failed: record $*"
+ echo "see log: ${log_err}"
+ exit 1
+ fi
+
+ args="$*"
+ check ${args##* }
+
+ rm -f ${log_out} ${log_err}
+ rm -rf ${build_id_dir}
+ rm -rf ${data}
+}
+
+# add binaries manual via perf buildid-cache -a
+test_add ${ex_sha1}
+test_add ${ex_md5}
+if [ ${add_pe} -eq 1 ]; then
+ test_add ${ex_pe}
+fi
+
+# add binaries via perf record post processing
+test_record ${ex_sha1}
+test_record ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ test_record wine ${ex_pe}
+fi
+
+# cleanup
+rm ${ex_sha1} ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ rm -r ${wineprefix}
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
new file mode 100644
index 0000000000..b070e77970
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../../../../../tools/scripts/Makefile.include
+include ../../../../../tools/scripts/Makefile.arch
+include ../../../../../tools/scripts/utilities.mak
+
+SUBDIRS = \
+ asm_pure_loop \
+ memcpy_thread \
+ thread_loop \
+ unroll_loop_thread
+
+all: $(SUBDIRS)
+$(SUBDIRS):
+ @$(MAKE) -C $@ >/dev/null
+
+INSTALLDIRS = $(SUBDIRS:%=install-%)
+
+install-tests: $(INSTALLDIRS)
+$(INSTALLDIRS):
+ @$(MAKE) -C $(@:install-%=%) install-tests >/dev/null
+
+CLEANDIRS = $(SUBDIRS:%=clean-%)
+
+clean: $(CLEANDIRS)
+$(CLEANDIRS):
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+
+.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/coresight/Makefile.miniconfig b/tools/perf/tests/shell/coresight/Makefile.miniconfig
new file mode 100644
index 0000000000..5f72a9cb43
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/Makefile.miniconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+ifndef DESTDIR
+prefix ?= $(HOME)
+endif
+
+DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
+INSTALL = install
+INSTDIR_SUB = tests/shell/coresight
+
+include ../../../../../scripts/Makefile.include
+include ../../../../../scripts/Makefile.arch
+include ../../../../../scripts/utilities.mak
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
new file mode 100755
index 0000000000..779bc8608e
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / ASM Pure Loop
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="asm_pure_loop"
+. "$(dirname $0)"/../lib/coresight.sh
+ARGS=""
+DATV="out"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore b/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore
new file mode 100644
index 0000000000..468673ac32
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore
@@ -0,0 +1 @@
+asm_pure_loop
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile b/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile
new file mode 100644
index 0000000000..206849e92b
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=asm_pure_loop
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=
+
+all: $(BIN)
+
+$(BIN): $(BIN).S
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line - this is raw asm with no libc to have an always exact binary
+ $(Q)$(CC) $(BIN).S -nostdlib -static -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
new file mode 100644
index 0000000000..75cf084a92
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tamas Zsoldos <tamas.zsoldos@arm.com>, 2021 */
+
+.globl _start
+_start:
+ mov x0, 0x0000ffff
+ mov x1, xzr
+loop:
+ nop
+ nop
+ cbnz x1, noskip
+ nop
+ nop
+ adrp x2, skip
+ add x2, x2, :lo12:skip
+ br x2
+ nop
+ nop
+noskip:
+ nop
+ nop
+skip:
+ sub x0, x0, 1
+ cbnz x0, loop
+
+ mov x0, #0
+ mov x8, #93 // __NR_exit syscall
+ svc #0
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore b/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore
new file mode 100644
index 0000000000..f8217e5609
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore
@@ -0,0 +1 @@
+memcpy_thread
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/Makefile b/tools/perf/tests/shell/coresight/memcpy_thread/Makefile
new file mode 100644
index 0000000000..2db637eb2c
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=memcpy_thread
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
new file mode 100644
index 0000000000..a7e169d1bf
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+
+struct args {
+ unsigned long loops;
+ unsigned long size;
+ pthread_t th;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ unsigned long i, len = a->loops;
+ unsigned char *src, *dst;
+
+ src = malloc(a->size * 1024);
+ dst = malloc(a->size * 1024);
+ if ((!src) || (!dst)) {
+ printf("ERR: Can't allocate memory\n");
+ exit(1);
+ }
+ for (i = 0; i < len; i++)
+ memcpy(dst, src, a->size * 1024);
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long i, len, size, thr;
+ pthread_t threads[256];
+ struct args args[256];
+ long long v;
+
+ if (argc < 4) {
+ printf("ERR: %s [copysize Kb] [numthreads] [numloops (hundreds)]\n", argv[0]);
+ exit(1);
+ }
+
+ v = atoll(argv[1]);
+ if ((v < 1) || (v > (1024 * 1024))) {
+ printf("ERR: max memory 1GB (1048576 KB)\n");
+ exit(1);
+ }
+ size = v;
+ thr = atol(argv[2]);
+ if ((thr < 1) || (thr > 256)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ v = atoll(argv[3]);
+ if ((v < 1) || (v > 40000000000ll)) {
+ printf("ERR: loops 1-40000000000 (hundreds)\n");
+ exit(1);
+ }
+ len = v * 100;
+ for (i = 0; i < thr; i++) {
+ args[i].loops = len;
+ args[i].size = size;
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
new file mode 100755
index 0000000000..08a44e52ce
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / Memcpy 16k 10 Threads
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="memcpy_thread"
+. "$(dirname $0)"/../lib/coresight.sh
+ARGS="16 10 1"
+DATV="16k_10"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/thread_loop/.gitignore b/tools/perf/tests/shell/coresight/thread_loop/.gitignore
new file mode 100644
index 0000000000..6d4c33eaa9
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/.gitignore
@@ -0,0 +1 @@
+thread_loop
diff --git a/tools/perf/tests/shell/coresight/thread_loop/Makefile b/tools/perf/tests/shell/coresight/thread_loop/Makefile
new file mode 100644
index 0000000000..ea846c038e
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=thread_loop
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
new file mode 100644
index 0000000000..c0158fac7d
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+// define this for gettid()
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+#include <sys/syscall.h>
+#ifndef SYS_gettid
+// gettid is 178 on arm64
+# define SYS_gettid 178
+#endif
+#define gettid() syscall(SYS_gettid)
+
+struct args {
+ unsigned int loops;
+ pthread_t th;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ int i = 0, len = a->loops;
+
+ if (getenv("SHOW_TID")) {
+ unsigned long long tid = gettid();
+
+ printf("%llu\n", tid);
+ }
+ asm volatile(
+ "loop:\n"
+ "add %[i], %[i], #1\n"
+ "cmp %[i], %[len]\n"
+ "blt loop\n"
+ : /* out */
+ : /* in */ [i] "r" (i), [len] "r" (len)
+ : /* clobber */
+ );
+ return (void *)(long)i;
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int i, len, thr;
+ pthread_t threads[256];
+ struct args args[256];
+
+ if (argc < 3) {
+ printf("ERR: %s [numthreads] [numloops (millions)]\n", argv[0]);
+ exit(1);
+ }
+
+ thr = atoi(argv[1]);
+ if ((thr < 1) || (thr > 256)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ len = atoi(argv[2]);
+ if ((len < 1) || (len > 4000)) {
+ printf("ERR: max loops 4000 (millions)\n");
+ exit(1);
+ }
+ len *= 1000000;
+ for (i = 0; i < thr; i++) {
+ args[i].loops = len;
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
new file mode 100755
index 0000000000..c83a200ded
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+# CoreSight / Thread Loop 10 Threads - Check TID
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="thread_loop"
+. "$(dirname $0)"/../lib/coresight.sh
+ARGS="10 1"
+DATV="check-tid-10th"
+DATA="$DATD/perf-$TEST-$DATV.data"
+STDO="$DATD/perf-$TEST-$DATV.stdout"
+
+SHOW_TID=1 perf record -s $PERFRECOPT -o "$DATA" "$BIN" $ARGS > $STDO
+
+perf_dump_aux_tid_verify "$DATA" "$STDO"
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
new file mode 100755
index 0000000000..6346fd5e87
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+# CoreSight / Thread Loop 2 Threads - Check TID
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="thread_loop"
+. "$(dirname $0)"/../lib/coresight.sh
+ARGS="2 20"
+DATV="check-tid-2th"
+DATA="$DATD/perf-$TEST-$DATV.data"
+STDO="$DATD/perf-$TEST-$DATV.stdout"
+
+SHOW_TID=1 perf record -s $PERFRECOPT -o "$DATA" "$BIN" $ARGS > $STDO
+
+perf_dump_aux_tid_verify "$DATA" "$STDO"
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore b/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore
new file mode 100644
index 0000000000..2cb4e996db
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore
@@ -0,0 +1 @@
+unroll_loop_thread
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile b/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile
new file mode 100644
index 0000000000..6264c4e3ab
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=unroll_loop_thread
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
new file mode 100644
index 0000000000..8f6d384208
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+
+struct args {
+ pthread_t th;
+ unsigned int in;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ unsigned int i, in = a->in;
+
+ for (i = 0; i < 10000; i++) {
+ asm volatile (
+// force an unroll of thia add instruction so we can test long runs of code
+#define SNIP1 "add %[in], %[in], #1\n"
+// 10
+#define SNIP2 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1
+// 100
+#define SNIP3 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2
+// 1000
+#define SNIP4 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3
+// 10000
+#define SNIP5 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4
+// 100000
+ SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5
+ : /* out */
+ : /* in */ [in] "r" (in)
+ : /* clobber */
+ );
+ }
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int i, thr;
+ pthread_t threads[256];
+ struct args args[256];
+
+ if (argc < 2) {
+ printf("ERR: %s [numthreads]\n", argv[0]);
+ exit(1);
+ }
+
+ thr = atoi(argv[1]);
+ if ((thr > 256) || (thr < 1)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ for (i = 0; i < thr; i++) {
+ args[i].in = rand();
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
new file mode 100755
index 0000000000..7304e3d3a6
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / Unroll Loop Thread 10
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="unroll_loop_thread"
+. "$(dirname $0)"/../lib/coresight.sh
+ARGS="10"
+DATV="10"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
new file mode 100755
index 0000000000..4c598cfc5a
--- /dev/null
+++ b/tools/perf/tests/shell/daemon.sh
@@ -0,0 +1,524 @@
+#!/bin/bash
+# daemon operations
+# SPDX-License-Identifier: GPL-2.0
+
+check_line_first()
+{
+ local line=$1
+ local name=$2
+ local base=$3
+ local output=$4
+ local lock=$5
+ local up=$6
+
+ local line_name
+ line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_base
+ line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_output
+ line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_lock
+ line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_up
+ line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+
+ if [ "${name}" != "${line_name}" ]; then
+ echo "FAILED: wrong name"
+ error=1
+ fi
+
+ if [ "${base}" != "${line_base}" ]; then
+ echo "FAILED: wrong base"
+ error=1
+ fi
+
+ if [ "${output}" != "${line_output}" ]; then
+ echo "FAILED: wrong output"
+ error=1
+ fi
+
+ if [ "${lock}" != "${line_lock}" ]; then
+ echo "FAILED: wrong lock"
+ error=1
+ fi
+
+ if [ "${up}" != "${line_up}" ]; then
+ echo "FAILED: wrong up"
+ error=1
+ fi
+}
+
+check_line_other()
+{
+ local line=$1
+ local name=$2
+ local run=$3
+ local base=$4
+ local output=$5
+ local control=$6
+ local ack=$7
+ local up=$8
+
+ local line_name
+ line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_run
+ line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_base
+ line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_output
+ line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_control
+ line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+ local line_ack
+ line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'`
+ local line_up
+ line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'`
+
+ if [ "${name}" != "${line_name}" ]; then
+ echo "FAILED: wrong name"
+ error=1
+ fi
+
+ if [ "${run}" != "${line_run}" ]; then
+ echo "FAILED: wrong run"
+ error=1
+ fi
+
+ if [ "${base}" != "${line_base}" ]; then
+ echo "FAILED: wrong base"
+ error=1
+ fi
+
+ if [ "${output}" != "${line_output}" ]; then
+ echo "FAILED: wrong output"
+ error=1
+ fi
+
+ if [ "${control}" != "${line_control}" ]; then
+ echo "FAILED: wrong control"
+ error=1
+ fi
+
+ if [ "${ack}" != "${line_ack}" ]; then
+ echo "FAILED: wrong ack"
+ error=1
+ fi
+
+ if [ "${up}" != "${line_up}" ]; then
+ echo "FAILED: wrong up"
+ error=1
+ fi
+}
+
+daemon_exit()
+{
+ local config=$1
+
+ local line
+ line=`perf daemon --config ${config} -x: | head -1`
+ local pid
+ pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # Reset trap handler.
+ trap - SIGINT SIGTERM
+
+ # stop daemon
+ perf daemon stop --config ${config}
+
+ # ... and wait for the pid to go away
+ tail --pid=${pid} -f /dev/null
+}
+
+daemon_start()
+{
+ local config=$1
+ local session=$2
+
+ perf daemon start --config ${config}
+
+ # Clean up daemon if interrupted.
+ trap 'echo "FAILED: Signal caught"; daemon_exit "${config}"; exit 1' SIGINT SIGTERM
+
+ # wait for the session to ping
+ local state="FAIL"
+ local retries=0
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session ${session} | awk '{ print $1 }'`
+ sleep 0.05
+ retries=$((${retries} +1))
+ if [ ${retries} -ge 600 ]; then
+ echo "FAILED: Timeout waiting for daemon to ping"
+ daemon_exit ${config}
+ exit 1
+ fi
+ done
+}
+
+test_list()
+{
+ echo "test daemon list"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # check first line
+ # pid:daemon:base:base/output:base/lock
+ local line
+ line=`perf daemon --config ${config} -x: | head -1`
+ check_line_first ${line} daemon ${base} ${base}/output ${base}/lock "0"
+
+ # check 1st session
+ # pid:size:-e cpu-clock:base/size:base/size/output:base/size/control:base/size/ack:0
+ local line
+ line=`perf daemon --config ${config} -x: | head -2 | tail -1`
+ check_line_other "${line}" size "-e cpu-clock -m 1 sleep 10" ${base}/session-size \
+ ${base}/session-size/output ${base}/session-size/control \
+ ${base}/session-size/ack "0"
+
+ # check 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control \
+ ${base}/session-time/ack "0"
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_reconfig()
+{
+ echo "test daemon reconfig"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # check 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
+ local pid
+ pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # prepare new config
+ local config_new=${config}.new
+ cat <<EOF > ${config_new}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e cpu-clock -m 1 sleep 10
+EOF
+
+ # TEST 1 - change config
+
+ sed -i -e "s|BASE|${base}|" ${config_new}
+ cp ${config_new} ${config}
+
+ # wait for old session to finish
+ tail --pid=${pid} -f /dev/null
+
+ # wait for new one to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ # check reconfigured 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line
+ line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e cpu-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
+
+ # TEST 2 - empty config
+
+ local config_empty=${config}.empty
+ cat <<EOF > ${config_empty}
+[daemon]
+base=BASE
+EOF
+
+ # change config
+ sed -i -e "s|BASE|${base}|" ${config_empty}
+ cp ${config_empty} ${config}
+
+ # wait for sessions to finish
+ local state="OK"
+ while [ "${state}" != "FAIL" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ local state="OK"
+ while [ "${state}" != "FAIL" ]; do
+ state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ done
+
+ local one
+ one=`perf daemon --config ${config} -x: | wc -l`
+
+ if [ ${one} -ne "1" ]; then
+ echo "FAILED: wrong list output"
+ error=1
+ fi
+
+ # TEST 3 - config again
+
+ cp ${config_new} ${config}
+
+ # wait for size to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ done
+
+ # wait for time to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+ rm -f ${config_new}
+ rm -f ${config_empty}
+}
+
+test_stop()
+{
+ echo "test daemon stop"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ local pid_size
+ pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 |
+ awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local pid_time
+ pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 |
+ awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # check that sessions are running
+ if [ ! -d "/proc/${pid_size}" ]; then
+ echo "FAILED: session size not up"
+ fi
+
+ if [ ! -d "/proc/${pid_time}" ]; then
+ echo "FAILED: session time not up"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ # check that sessions are gone
+ if [ -d "/proc/${pid_size}" ]; then
+ echo "FAILED: session size still up"
+ fi
+
+ if [ -d "/proc/${pid_time}" ]; then
+ echo "FAILED: session time still up"
+ fi
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_signal()
+{
+ echo "test daemon signal"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-test]
+run = -e cpu-clock --switch-output -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} test
+
+ # send 2 signals
+ perf daemon signal --config ${config} --session test
+ perf daemon signal --config ${config}
+
+ # stop daemon
+ daemon_exit ${config}
+
+ # count is 2 perf.data for signals and 1 for perf record finished
+ count=`ls ${base}/session-test/*perf.data* | wc -l`
+ if [ ${count} -ne 3 ]; then
+ error=1
+ echo "FAILED: perf data no generated"
+ fi
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_ping()
+{
+ echo "test daemon ping"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ size=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ type=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+
+ if [ ${size} != "OK" ] || [ ${type} != "OK" ]; then
+ error=1
+ echo "FAILED: daemon ping failed"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_lock()
+{
+ echo "test daemon lock"
+
+ local config
+ config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base
+ base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # start second daemon over the same config/base
+ failed=`perf daemon start --config ${config} 2>&1 | awk '{ print $1 }'`
+
+ # check that we failed properly
+ if [ ${failed} != "failed:" ]; then
+ error=1
+ echo "FAILED: daemon lock failed"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+error=0
+
+test_list
+test_reconfig
+test_stop
+test_signal
+test_ping
+test_lock
+
+exit ${error}
diff --git a/tools/perf/tests/shell/lib/coresight.sh b/tools/perf/tests/shell/lib/coresight.sh
new file mode 100644
index 0000000000..6c3d34ec64
--- /dev/null
+++ b/tools/perf/tests/shell/lib/coresight.sh
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+# This is sourced from a driver script so no need for #!/bin... etc. at the
+# top - the assumption below is that it runs as part of sourcing after the
+# test sets up some basic env vars to say what it is.
+
+# This currently works with ETMv4 / ETF not any other packet types at thi
+# point. This will need changes if that changes.
+
+# perf record options for the perf tests to use
+PERFRECMEM="-m ,16M"
+PERFRECOPT="$PERFRECMEM -e cs_etm//u"
+
+TOOLS=$(dirname $0)
+DIR="$TOOLS/$TEST"
+BIN="$DIR/$TEST"
+# If the test tool/binary does not exist and is executable then skip the test
+if ! test -x "$BIN"; then exit 2; fi
+DATD="."
+# If the data dir env is set then make the data dir use that instead of ./
+if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then
+ DATD="$PERF_TEST_CORESIGHT_DATADIR";
+fi
+# If the stat dir env is set then make the data dir use that instead of ./
+STATD="."
+if test -n "$PERF_TEST_CORESIGHT_STATDIR"; then
+ STATD="$PERF_TEST_CORESIGHT_STATDIR";
+fi
+
+# Called if the test fails - error code 1
+err() {
+ echo "$1"
+ exit 1
+}
+
+# Check that some statistics from our perf
+check_val_min() {
+ STATF="$4"
+ if test "$2" -lt "$3"; then
+ echo ", FAILED" >> "$STATF"
+ err "Sanity check number of $1 is too low ($2 < $3)"
+ fi
+}
+
+perf_dump_aux_verify() {
+ # Some basic checking that the AUX chunk contains some sensible data
+ # to see that we are recording something and at least a minimum
+ # amount of it. We should almost always see Fn packets in just about
+ # anything but certainly we will see some trace info and async
+ # packets
+ DUMP="$DATD/perf-tmp-aux-dump.txt"
+ perf report --stdio --dump -i "$1" | \
+ grep -o -e I_ATOM_F -e I_ASYNC -e I_TRACE_INFO > "$DUMP"
+ # Simply count how many of these packets we find to see that we are
+ # producing a reasonable amount of data - exact checks are not sane
+ # as this is a lossy process where we may lose some blocks and the
+ # compiler may produce different code depending on the compiler and
+ # optimization options, so this is rough just to see if we're
+ # either missing almost all the data or all of it
+ ATOM_FX_NUM=$(grep -c I_ATOM_F "$DUMP")
+ ASYNC_NUM=$(grep -c I_ASYNC "$DUMP")
+ TRACE_INFO_NUM=$(grep -c I_TRACE_INFO "$DUMP")
+ rm -f "$DUMP"
+
+ # Arguments provide minimums for a pass
+ CHECK_FX_MIN="$2"
+ CHECK_ASYNC_MIN="$3"
+ CHECK_TRACE_INFO_MIN="$4"
+
+ # Write out statistics, so over time you can track results to see if
+ # there is a pattern - for example we have less "noisy" results that
+ # produce more consistent amounts of data each run, to see if over
+ # time any techinques to minimize data loss are having an effect or
+ # not
+ STATF="$STATD/stats-$TEST-$DATV.csv"
+ if ! test -f "$STATF"; then
+ echo "ATOM Fx Count, Minimum, ASYNC Count, Minimum, TRACE INFO Count, Minimum" > "$STATF"
+ fi
+ echo -n "$ATOM_FX_NUM, $CHECK_FX_MIN, $ASYNC_NUM, $CHECK_ASYNC_MIN, $TRACE_INFO_NUM, $CHECK_TRACE_INFO_MIN" >> "$STATF"
+
+ # Actually check to see if we passed or failed.
+ check_val_min "ATOM_FX" "$ATOM_FX_NUM" "$CHECK_FX_MIN" "$STATF"
+ check_val_min "ASYNC" "$ASYNC_NUM" "$CHECK_ASYNC_MIN" "$STATF"
+ check_val_min "TRACE_INFO" "$TRACE_INFO_NUM" "$CHECK_TRACE_INFO_MIN" "$STATF"
+ echo ", Ok" >> "$STATF"
+}
+
+perf_dump_aux_tid_verify() {
+ # Specifically crafted test will produce a list of Tread ID's to
+ # stdout that need to be checked to see that they have had trace
+ # info collected in AUX blocks in the perf data. This will go
+ # through all the TID's that are listed as CID=0xabcdef and see
+ # that all the Thread IDs the test tool reports are in the perf
+ # data AUX chunks
+
+ # The TID test tools will print a TID per stdout line that are being
+ # tested
+ TIDS=$(cat "$2")
+ # Scan the perf report to find the TIDs that are actually CID in hex
+ # and build a list of the ones found
+ FOUND_TIDS=$(perf report --stdio --dump -i "$1" | \
+ grep -o "CID=0x[0-9a-z]\+" | sed 's/CID=//g' | \
+ uniq | sort | uniq)
+ # No CID=xxx found - maybe your kernel is reporting these as
+ # VMID=xxx so look there
+ if test -z "$FOUND_TIDS"; then
+ FOUND_TIDS=$(perf report --stdio --dump -i "$1" | \
+ grep -o "VMID=0x[0-9a-z]\+" | sed 's/VMID=//g' | \
+ uniq | sort | uniq)
+ fi
+
+ # Iterate over the list of TIDs that the test says it has and find
+ # them in the TIDs found in the perf report
+ MISSING=""
+ for TID2 in $TIDS; do
+ FOUND=""
+ for TIDHEX in $FOUND_TIDS; do
+ TID=$(printf "%i" $TIDHEX)
+ if test "$TID" -eq "$TID2"; then
+ FOUND="y"
+ break
+ fi
+ done
+ if test -z "$FOUND"; then
+ MISSING="$MISSING $TID"
+ fi
+ done
+ if test -n "$MISSING"; then
+ err "Thread IDs $MISSING not found in perf AUX data"
+ fi
+}
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
new file mode 100644
index 0000000000..ea55d5ea1c
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Basic sanity check of perf JSON output as specified in the man page.
+
+import argparse
+import sys
+import json
+
+ap = argparse.ArgumentParser()
+ap.add_argument('--no-args', action='store_true')
+ap.add_argument('--interval', action='store_true')
+ap.add_argument('--system-wide-no-aggr', action='store_true')
+ap.add_argument('--system-wide', action='store_true')
+ap.add_argument('--event', action='store_true')
+ap.add_argument('--per-core', action='store_true')
+ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-cache', action='store_true')
+ap.add_argument('--per-die', action='store_true')
+ap.add_argument('--per-node', action='store_true')
+ap.add_argument('--per-socket', action='store_true')
+ap.add_argument('--file', type=argparse.FileType('r'), default=sys.stdin)
+args = ap.parse_args()
+
+Lines = args.file.readlines()
+
+def isfloat(num):
+ try:
+ float(num)
+ return True
+ except ValueError:
+ return False
+
+
+def isint(num):
+ try:
+ int(num)
+ return True
+ except ValueError:
+ return False
+
+def is_counter_value(num):
+ return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+
+def check_json_output(expected_items):
+ checks = {
+ 'aggregate-number': lambda x: isfloat(x),
+ 'core': lambda x: True,
+ 'counter-value': lambda x: is_counter_value(x),
+ 'cgroup': lambda x: True,
+ 'cpu': lambda x: isint(x),
+ 'cache': lambda x: True,
+ 'die': lambda x: True,
+ 'event': lambda x: True,
+ 'event-runtime': lambda x: isfloat(x),
+ 'interval': lambda x: isfloat(x),
+ 'metric-unit': lambda x: True,
+ 'metric-value': lambda x: isfloat(x),
+ 'metricgroup': lambda x: True,
+ 'node': lambda x: True,
+ 'pcnt-running': lambda x: isfloat(x),
+ 'socket': lambda x: True,
+ 'thread': lambda x: True,
+ 'unit': lambda x: True,
+ }
+ input = '[\n' + ','.join(Lines) + '\n]'
+ for item in json.loads(input):
+ if expected_items != -1:
+ count = len(item)
+ if count != expected_items and count >= 1 and count <= 6 and 'metric-value' in item:
+ # Events that generate >1 metric may have isolated metric
+ # values and possibly other prefixes like interval, core,
+ # aggregate-number, or event-runtime/pcnt-running from multiplexing.
+ pass
+ elif count != expected_items and count >= 1 and count <= 5 and 'metricgroup' in item:
+ pass
+ elif count != expected_items:
+ raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+ f' in \'{item}\'')
+ for key, value in item.items():
+ if key not in checks:
+ raise RuntimeError(f'Unexpected key: key={key} value={value}')
+ if not checks[key](value):
+ raise RuntimeError(f'Check failed for: key={key} value={value}')
+
+
+try:
+ if args.no_args or args.system_wide or args.event:
+ expected_items = 7
+ elif args.interval or args.per_thread or args.system_wide_no_aggr:
+ expected_items = 8
+ elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:
+ expected_items = 9
+ else:
+ # If no option is specified, don't check the number of items.
+ expected_items = -1
+ check_json_output(expected_items)
+except:
+ print('Test failed for input:\n' + '\n'.join(Lines))
+ raise
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
new file mode 100644
index 0000000000..50a34a9cc0
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -0,0 +1,574 @@
+#SPDX-License-Identifier: GPL-2.0
+import re
+import csv
+import json
+import argparse
+from pathlib import Path
+import subprocess
+
+class Validator:
+ def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
+ self.rulefname = rulefname
+ self.reportfname = reportfname
+ self.rules = None
+ self.collectlist:str = metrics
+ self.metrics = self.__set_metrics(metrics)
+ self.skiplist = set()
+ self.tolerance = t
+
+ self.workloads = [x for x in workload.split(",") if x]
+ self.wlidx = 0 # idx of current workloads
+ self.allresults = dict() # metric results of all workload
+ self.allignoremetrics = dict() # metrics with no results or negative results
+ self.allfailtests = dict()
+ self.alltotalcnt = dict()
+ self.allpassedcnt = dict()
+ self.allerrlist = dict()
+
+ self.results = dict() # metric results of current workload
+ # vars for test pass/failure statistics
+ self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
+ self.failtests = dict()
+ self.totalcnt = 0
+ self.passedcnt = 0
+ # vars for errors
+ self.errlist = list()
+
+ # vars for Rule Generator
+ self.pctgmetrics = set() # Percentage rule
+
+ # vars for debug
+ self.datafname = datafname
+ self.debug = debug
+ self.fullrulefname = fullrulefname
+
+ def __set_metrics(self, metrics=''):
+ if metrics != '':
+ return set(metrics.split(","))
+ else:
+ return set()
+
+ def read_json(self, filename: str) -> dict:
+ try:
+ with open(Path(filename).resolve(), "r") as f:
+ data = json.loads(f.read())
+ except OSError as e:
+ print(f"Error when reading file {e}")
+ sys.exit()
+
+ return data
+
+ def json_dump(self, data, output_file):
+ parent = Path(output_file).parent
+ if not parent.exists():
+ parent.mkdir(parents=True)
+
+ with open(output_file, "w+") as output_file:
+ json.dump(data,
+ output_file,
+ ensure_ascii=True,
+ indent=4)
+
+ def get_results(self, idx:int = 0):
+ return self.results[idx]
+
+ def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
+ """
+ Get bounds and tolerance from lb, ub, and error.
+ If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance.
+
+ @param lb: str/float, lower bound
+ @param ub: str/float, upper bound
+ @param error: float/str, error tolerance
+ @returns: lower bound, return inf if the lower bound is a metric value and is not collected
+ upper bound, return -1 if the upper bound is a metric value and is not collected
+ tolerance, denormalized base on upper bound value
+ """
+ # init ubv and lbv to invalid values
+ def get_bound_value (bound, initval, ridx):
+ val = initval
+ if isinstance(bound, int) or isinstance(bound, float):
+ val = bound
+ elif isinstance(bound, str):
+ if bound == '':
+ val = float("inf")
+ elif bound in alias:
+ vall = self.get_value(alias[ub], ridx)
+ if vall:
+ val = vall[0]
+ elif bound.replace('.', '1').isdigit():
+ val = float(bound)
+ else:
+ print("Wrong bound: {0}".format(bound))
+ else:
+ print("Wrong bound: {0}".format(bound))
+ return val
+
+ ubv = get_bound_value(ub, -1, ridx)
+ lbv = get_bound_value(lb, float('inf'), ridx)
+ t = get_bound_value(error, self.tolerance, ridx)
+
+ # denormalize error threshold
+ denormerr = t * ubv / 100 if ubv != 100 and ubv > 0 else t
+
+ return lbv, ubv, denormerr
+
+ def get_value(self, name:str, ridx:int = 0) -> list:
+ """
+ Get value of the metric from self.results.
+ If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist.
+ All future test(s) on this metric will fail.
+
+ @param name: name of the metric
+ @returns: list with value found in self.results; list is empty when value is not found.
+ """
+ results = []
+ data = self.results[ridx] if ridx in self.results else self.results[0]
+ if name not in self.ignoremetrics:
+ if name in data:
+ results.append(data[name])
+ elif name.replace('.', '1').isdigit():
+ results.append(float(name))
+ else:
+ self.ignoremetrics.add(name)
+ return results
+
+ def check_bound(self, val, lb, ub, err):
+ return True if val <= ub + err and val >= lb - err else False
+
+ # Positive Value Sanity check
+ def pos_val_test(self):
+ """
+ Check if metrics value are non-negative.
+ One metric is counted as one test.
+ Failure: when metric value is negative or not provided.
+ Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics.
+ """
+ negmetric = dict()
+ pcnt = 0
+ tcnt = 0
+ rerun = list()
+ for name, val in self.get_results().items():
+ if val < 0:
+ negmetric[name] = val
+ rerun.append(name)
+ else:
+ pcnt += 1
+ tcnt += 1
+ if len(rerun) > 0 and len(rerun) < 20:
+ second_results = dict()
+ self.second_test(rerun, second_results)
+ for name, val in second_results.items():
+ if name not in negmetric: continue
+ if val >= 0:
+ del negmetric[name]
+ pcnt += 1
+
+ self.failtests['PositiveValueTest']['Total Tests'] = tcnt
+ self.failtests['PositiveValueTest']['Passed Tests'] = pcnt
+ if len(negmetric.keys()):
+ self.ignoremetrics.update(negmetric.keys())
+ negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()]
+ self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage})
+
+ return
+
+ def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0):
+ """
+ Evaluate the value of formula.
+
+ @param formula: the formula to be evaluated
+ @param alias: the dict has alias to metric name mapping
+ @returns: value of the formula is success; -1 if the one or more metric value not provided
+ """
+ stack = []
+ b = 0
+ errs = []
+ sign = "+"
+ f = str()
+
+ #TODO: support parenthesis?
+ for i in range(len(formula)):
+ if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
+ s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]]
+ v = self.get_value(s, ridx)
+ if not v:
+ errs.append(s)
+ else:
+ f = f + "{0}(={1:.4f})".format(s, v[0])
+ if sign == "*":
+ stack[-1] = stack[-1] * v
+ elif sign == "/":
+ stack[-1] = stack[-1] / v
+ elif sign == '-':
+ stack.append(-v[0])
+ else:
+ stack.append(v[0])
+ if i + 1 < len(formula):
+ sign = formula[i]
+ f += sign
+ b = i + 1
+
+ if len(errs) > 0:
+ return -1, "Metric value missing: "+','.join(errs)
+
+ val = sum(stack)
+ return val, f
+
+ # Relationships Tests
+ def relationship_test(self, rule: dict):
+ """
+ Validate if the metrics follow the required relationship in the rule.
+ eg. lower_bound <= eval(formula)<= upper_bound
+ One rule is counted as ont test.
+ Failure: when one or more metric result(s) not provided, or when formula evaluated outside of upper/lower bounds.
+
+ @param rule: dict with metric name(+alias), formula, and required upper and lower bounds.
+ """
+ alias = dict()
+ for m in rule['Metrics']:
+ alias[m['Alias']] = m['Name']
+ lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
+ val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex'])
+ if val == -1:
+ self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f})
+ elif not self.check_bound(val, lbv, ubv, t):
+ lb = rule['RangeLower']
+ ub = rule['RangeUpper']
+ if isinstance(lb, str):
+ if lb in alias:
+ lb = alias[lb]
+ if isinstance(ub, str):
+ if ub in alias:
+ ub = alias[ub]
+ self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f,
+ 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb),
+ 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub),
+ 'ErrorThreshold': t, 'CollectedValue': val})
+ else:
+ self.passedcnt += 1
+ self.failtests['RelationshipTest']['Passed Tests'] += 1
+ self.totalcnt += 1
+ self.failtests['RelationshipTest']['Total Tests'] += 1
+
+ return
+
+
+ # Single Metric Test
+ def single_test(self, rule:dict):
+ """
+ Validate if the metrics are in the required value range.
+ eg. lower_bound <= metrics_value <= upper_bound
+ One metric is counted as one test in this type of test.
+ One rule may include one or more metrics.
+ Failure: when the metric value not provided or the value is outside the bounds.
+ This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
+
+ @param rule: dict with metrics to validate and the value range requirement
+ """
+ lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
+ metrics = rule['Metrics']
+ passcnt = 0
+ totalcnt = 0
+ faillist = list()
+ failures = dict()
+ rerun = list()
+ for m in metrics:
+ totalcnt += 1
+ result = self.get_value(m['Name'])
+ if len(result) > 0 and self.check_bound(result[0], lbv, ubv, t) or m['Name'] in self.skiplist:
+ passcnt += 1
+ else:
+ failures[m['Name']] = result
+ rerun.append(m['Name'])
+
+ if len(rerun) > 0 and len(rerun) < 20:
+ second_results = dict()
+ self.second_test(rerun, second_results)
+ for name, val in second_results.items():
+ if name not in failures: continue
+ if self.check_bound(val, lbv, ubv, t):
+ passcnt += 1
+ del failures[name]
+ else:
+ failures[name] = val
+ self.results[0][name] = val
+
+ self.totalcnt += totalcnt
+ self.passedcnt += passcnt
+ self.failtests['SingleMetricTest']['Total Tests'] += totalcnt
+ self.failtests['SingleMetricTest']['Passed Tests'] += passcnt
+ if len(failures.keys()) != 0:
+ faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()]
+ self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'],
+ 'RangeLower': rule['RangeLower'],
+ 'RangeUpper': rule['RangeUpper'],
+ 'ErrorThreshold':rule['ErrorThreshold'],
+ 'Failure':faillist})
+
+ return
+
+ def create_report(self):
+ """
+ Create final report and write into a JSON file.
+ """
+ alldata = list()
+ for i in range(0, len(self.workloads)):
+ reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]}
+ data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i],
+ "Errors":self.allerrlist[i]}
+ alldata.append({"Workload": self.workloads[i], "Report": data})
+
+ json_str = json.dumps(alldata, indent=4)
+ print("Test validation finished. Final report: ")
+ print(json_str)
+
+ if self.debug:
+ allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))]
+ self.json_dump(allres, self.datafname)
+
+ def check_rule(self, testtype, metric_list):
+ """
+ Check if the rule uses metric(s) that not exist in current platform.
+
+ @param metric_list: list of metrics from the rule.
+ @return: False when find one metric out in Metric file. (This rule should not skipped.)
+ True when all metrics used in the rule are found in Metric file.
+ """
+ if testtype == "RelationshipTest":
+ for m in metric_list:
+ if m['Name'] not in self.metrics:
+ return False
+ return True
+
+ # Start of Collector and Converter
+ def convert(self, data: list, metricvalues:dict):
+ """
+ Convert collected metric data from the -j output to dict of {metric_name:value}.
+ """
+ for json_string in data:
+ try:
+ result =json.loads(json_string)
+ if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
+ name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
+ else result["metric-unit"]
+ metricvalues[name.lower()] = float(result["metric-value"])
+ except ValueError as error:
+ continue
+ return
+
+ def _run_perf(self, metric, workload: str):
+ tool = 'perf'
+ command = [tool, 'stat', '-j', '-M', f"{metric}", "-a"]
+ wl = workload.split()
+ command.extend(wl)
+ print(" ".join(command))
+ cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
+ data = [x+'}' for x in cmd.stderr.split('}\n') if x]
+ return data
+
+
+ def collect_perf(self, workload: str):
+ """
+ Collect metric data with "perf stat -M" on given workload with -a and -j.
+ """
+ self.results = dict()
+ print(f"Starting perf collection")
+ print(f"Long workload: {workload}")
+ collectlist = dict()
+ if self.collectlist != "":
+ collectlist[0] = {x for x in self.collectlist.split(",")}
+ else:
+ collectlist[0] = set(list(self.metrics))
+ # Create metric set for relationship rules
+ for rule in self.rules:
+ if rule["TestType"] == "RelationshipTest":
+ metrics = [m["Name"] for m in rule["Metrics"]]
+ if not any(m not in collectlist[0] for m in metrics):
+ collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
+
+ for idx, metrics in collectlist.items():
+ if idx == 0: wl = "true"
+ else: wl = workload
+ for metric in metrics:
+ data = self._run_perf(metric, wl)
+ if idx not in self.results: self.results[idx] = dict()
+ self.convert(data, self.results[idx])
+ return
+
+ def second_test(self, collectlist, second_results):
+ workload = self.workloads[self.wlidx]
+ for metric in collectlist:
+ data = self._run_perf(metric, workload)
+ self.convert(data, second_results)
+
+ # End of Collector and Converter
+
+ # Start of Rule Generator
+ def parse_perf_metrics(self):
+ """
+ Read and parse perf metric file:
+ 1) find metrics with '1%' or '100%' as ScaleUnit for Percent check
+ 2) create metric name list
+ """
+ command = ['perf', 'list', '-j', '--details', 'metrics']
+ cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
+ try:
+ data = json.loads(cmd.stdout)
+ for m in data:
+ if 'MetricName' not in m:
+ print("Warning: no metric name")
+ continue
+ name = m['MetricName'].lower()
+ self.metrics.add(name)
+ if 'ScaleUnit' in m and (m['ScaleUnit'] == '1%' or m['ScaleUnit'] == '100%'):
+ self.pctgmetrics.add(name.lower())
+ except ValueError as error:
+ print(f"Error when parsing metric data")
+ sys.exit()
+
+ return
+
+ def remove_unsupported_rules(self, rules):
+ new_rules = []
+ for rule in rules:
+ add_rule = True
+ for m in rule["Metrics"]:
+ if m["Name"] in self.skiplist or m["Name"] not in self.metrics:
+ add_rule = False
+ break
+ if add_rule:
+ new_rules.append(rule)
+ return new_rules
+
+ def create_rules(self):
+ """
+ Create full rules which includes:
+ 1) All the rules from the "relationshi_rules" file
+ 2) SingleMetric rule for all the 'percent' metrics
+
+ Reindex all the rules to avoid repeated RuleIndex
+ """
+ data = self.read_json(self.rulefname)
+ rules = data['RelationshipRules']
+ self.skiplist = set([name.lower() for name in data['SkipList']])
+ self.rules = self.remove_unsupported_rules(rules)
+ pctgrule = {'RuleIndex':0,
+ 'TestType':'SingleMetricTest',
+ 'RangeLower':'0',
+ 'RangeUpper': '100',
+ 'ErrorThreshold': self.tolerance,
+ 'Description':'Metrics in percent unit have value with in [0, 100]',
+ 'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]}
+ self.rules.append(pctgrule)
+
+ # Re-index all rules to avoid repeated RuleIndex
+ idx = 1
+ for r in self.rules:
+ r['RuleIndex'] = idx
+ idx += 1
+
+ if self.debug:
+ #TODO: need to test and generate file name correctly
+ data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]}
+ self.json_dump(data, self.fullrulefname)
+
+ return
+ # End of Rule Generator
+
+ def _storewldata(self, key):
+ '''
+ Store all the data of one workload into the corresponding data structure for all workloads.
+ @param key: key to the dictionaries (index of self.workloads).
+ '''
+ self.allresults[key] = self.results
+ self.allignoremetrics[key] = self.ignoremetrics
+ self.allfailtests[key] = self.failtests
+ self.alltotalcnt[key] = self.totalcnt
+ self.allpassedcnt[key] = self.passedcnt
+ self.allerrlist[key] = self.errlist
+
+ #Initialize data structures before data validation of each workload
+ def _init_data(self):
+
+ testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest']
+ self.results = dict()
+ self.ignoremetrics= set()
+ self.errlist = list()
+ self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes}
+ self.totalcnt = 0
+ self.passedcnt = 0
+
+ def test(self):
+ '''
+ The real entry point of the test framework.
+ This function loads the validation rule JSON file and Standard Metric file to create rules for
+ testing and namemap dictionaries.
+ It also reads in result JSON file for testing.
+
+ In the test process, it passes through each rule and launch correct test function bases on the
+ 'TestType' field of the rule.
+
+ The final report is written into a JSON file.
+ '''
+ if not self.collectlist:
+ self.parse_perf_metrics()
+ self.create_rules()
+ for i in range(0, len(self.workloads)):
+ self.wlidx = i
+ self._init_data()
+ self.collect_perf(self.workloads[i])
+ # Run positive value test
+ self.pos_val_test()
+ for r in self.rules:
+ # skip rules that uses metrics not exist in this platform
+ testtype = r['TestType']
+ if not self.check_rule(testtype, r['Metrics']):
+ continue
+ if testtype == 'RelationshipTest':
+ self.relationship_test(r)
+ elif testtype == 'SingleMetricTest':
+ self.single_test(r)
+ else:
+ print("Unsupported Test Type: ", testtype)
+ self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
+ self._storewldata(i)
+ print("Workload: ", self.workloads[i])
+ print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests'])
+ print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
+ print("Total Test Count: ", self.totalcnt)
+ print("Passed Test Count: ", self.passedcnt)
+
+ self.create_report()
+ return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values())
+# End of Class Validator
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Launch metric value validation")
+
+ parser.add_argument("-rule", help="Base validation rule file", required=True)
+ parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True)
+ parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False)
+ parser.add_argument("-wl", help="Workload to run while data collection", default="true")
+ parser.add_argument("-m", help="Metric list to validate", default="")
+ args = parser.parse_args()
+ outpath = Path(args.output_dir)
+ reportf = Path.joinpath(outpath, 'perf_report.json')
+ fullrule = Path.joinpath(outpath, 'full_rule.json')
+ datafile = Path.joinpath(outpath, 'perf_data.json')
+
+ validator = Validator(args.rule, reportf, debug=args.debug,
+ datafname=datafile, fullrulefname=fullrule, workload=args.wl,
+ metrics=args.m)
+ ret = validator.test()
+
+ return ret
+
+
+if __name__ == "__main__":
+ import sys
+ sys.exit(main())
+
+
+
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation_rules.json b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
new file mode 100644
index 0000000000..eb6f59e018
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_metric_validation_rules.json
@@ -0,0 +1,398 @@
+{
+ "SkipList": [
+ "tsx_aborted_cycles",
+ "tsx_transactional_cycles",
+ "C2_Pkg_Residency",
+ "C6_Pkg_Residency",
+ "C1_Core_Residency",
+ "C6_Core_Residency",
+ "tma_false_sharing",
+ "tma_remote_cache",
+ "tma_contested_accesses"
+ ],
+ "RelationshipRules": [
+ {
+ "RuleIndex": 1,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Intel(R) Optane(TM) Persistent Memory(PMEM) bandwidth total includes Intel(R) Optane(TM) Persistent Memory(PMEM) read bandwidth and Intel(R) Optane(TM) Persistent Memory(PMEM) write bandwidth",
+ "Metrics": [
+ {
+ "Name": "pmem_memory_bandwidth_read",
+ "Alias": "a"
+ },
+ {
+ "Name": "pmem_memory_bandwidth_write",
+ "Alias": "b"
+ },
+ {
+ "Name": "pmem_memory_bandwidth_total",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 2,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "DDR memory bandwidth total includes DDR memory read bandwidth and DDR memory write bandwidth",
+ "Metrics": [
+ {
+ "Name": "memory_bandwidth_read",
+ "Alias": "a"
+ },
+ {
+ "Name": "memory_bandwidth_write",
+ "Alias": "b"
+ },
+ {
+ "Name": "memory_bandwidth_total",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 3,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "Total memory read accesses includes memory reads from last level cache (LLC) addressed to local DRAM and memory reads from the last level cache (LLC) addressed to remote DRAM.",
+ "Metrics": [
+ {
+ "Name": "numa_reads_addressed_to_local_dram",
+ "Alias": "a"
+ },
+ {
+ "Name": "numa_reads_addressed_to_remote_dram",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 4,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0.125",
+ "RangeUpper": "",
+ "ErrorThreshold": "",
+ "Description": "",
+ "Metrics": [
+ {
+ "Name": "cpi",
+ "Alias": "a"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 5,
+ "Formula": "",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "1",
+ "ErrorThreshold": 5.0,
+ "Description": "Ratio values should be within value range [0,1)",
+ "Metrics": [
+ {
+ "Name": "loads_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "stores_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l1d_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l1d_demand_data_read_hits_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l1_i_code_read_misses_with_prefetches_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_data_read_hits_per_instr",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_data_read_mpi",
+ "Alias": ""
+ },
+ {
+ "Name": "l2_demand_code_mpi",
+ "Alias": ""
+ }
+ ]
+ },
+ {
+ "RuleIndex": 6,
+ "Formula": "a+b+c+d",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of TMA level 1 metrics should be 100%",
+ "Metrics": [
+ {
+ "Name": "tma_frontend_bound",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_bad_speculation",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_backend_bound",
+ "Alias": "c"
+ },
+ {
+ "Name": "tma_retiring",
+ "Alias": "d"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 7,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_fetch_latency",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_fetch_bandwidth",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_frontend_bound",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 8,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_branch_mispredicts",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_machine_clears",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_bad_speculation",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 9,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_memory_bound",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_core_bound",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_backend_bound",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 10,
+ "Formula": "a+b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "c",
+ "RangeUpper": "c",
+ "ErrorThreshold": 5.0,
+ "Description": "Sum of the level 2 children should equal level 1 parent",
+ "Metrics": [
+ {
+ "Name": "tma_light_operations",
+ "Alias": "a"
+ },
+ {
+ "Name": "tma_heavy_operations",
+ "Alias": "b"
+ },
+ {
+ "Name": "tma_retiring",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 11,
+ "Formula": "a+b+c",
+ "TestType": "RelationshipTest",
+ "RangeLower": "100",
+ "RangeUpper": "100",
+ "ErrorThreshold": 5.0,
+ "Description": "The all_requests includes the memory_page_empty, memory_page_misses, and memory_page_hits equals.",
+ "Metrics": [
+ {
+ "Name": "memory_page_empty_vs_all_requests",
+ "Alias": "a"
+ },
+ {
+ "Name": "memory_page_misses_vs_all_requests",
+ "Alias": "b"
+ },
+ {
+ "Name": "memory_page_hits_vs_all_requests",
+ "Alias": "c"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 12,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "CPU utilization in kernel mode should always be <= cpu utilization",
+ "Metrics": [
+ {
+ "Name": "cpu_utilization",
+ "Alias": "a"
+ },
+ {
+ "Name": "cpu_utilization_in_kernel_mode",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 13,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "Total L2 misses per instruction should be >= L2 demand data read misses per instruction",
+ "Metrics": [
+ {
+ "Name": "l2_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "l2_demand_data_read_mpi",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 14,
+ "Formula": "a-b",
+ "TestType": "RelationshipTest",
+ "RangeLower": "0",
+ "RangeUpper": "",
+ "ErrorThreshold": 5.0,
+ "Description": "Total L2 misses per instruction should be >= L2 demand code misses per instruction",
+ "Metrics": [
+ {
+ "Name": "l2_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "l2_demand_code_mpi",
+ "Alias": "b"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 15,
+ "Formula": "b+c+d",
+ "TestType": "RelationshipTest",
+ "RangeLower": "a",
+ "RangeUpper": "a",
+ "ErrorThreshold": 5.0,
+ "Description": "L3 data read, rfo, code misses per instruction equals total L3 misses per instruction.",
+ "Metrics": [
+ {
+ "Name": "llc_mpi",
+ "Alias": "a"
+ },
+ {
+ "Name": "llc_data_read_mpi_demand_plus_prefetch",
+ "Alias": "b"
+ },
+ {
+ "Name": "llc_rfo_read_mpi_demand_plus_prefetch",
+ "Alias": "c"
+ },
+ {
+ "Name": "llc_code_read_mpi_demand_plus_prefetch",
+ "Alias": "d"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 16,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "8",
+ "ErrorThreshold": 0.0,
+ "Description": "Setting generous range for allowable frequencies",
+ "Metrics": [
+ {
+ "Name": "uncore_freq",
+ "Alias": "a"
+ }
+ ]
+ },
+ {
+ "RuleIndex": 17,
+ "Formula": "a",
+ "TestType": "SingleMetricTest",
+ "RangeLower": "0",
+ "RangeUpper": "8",
+ "ErrorThreshold": 0.0,
+ "Description": "Setting generous range for allowable frequencies",
+ "Metrics": [
+ {
+ "Name": "cpu_operating_frequency",
+ "Alias": "a"
+ }
+ ]
+ }
+ ]
+} \ No newline at end of file
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
new file mode 100644
index 0000000000..5aa6e2ec57
--- /dev/null
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+skip_if_no_perf_probe() {
+ perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
+ return 0
+}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
new file mode 100644
index 0000000000..bf4c1fb71c
--- /dev/null
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -0,0 +1,33 @@
+#!/bin/sh
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+perf probe -l 2>&1 | grep -q probe:vfs_getname
+had_vfs_getname=$?
+
+cleanup_probe_vfs_getname() {
+ if [ $had_vfs_getname -eq 1 ] ; then
+ perf probe -q -d probe:vfs_getname*
+ fi
+}
+
+add_probe_vfs_getname() {
+ add_probe_verbose=$1
+ if [ $had_vfs_getname -eq 1 ] ; then
+ line=$(perf probe -L getname_flags 2>&1 | grep -E 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+ perf probe $add_probe_verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ fi
+}
+
+skip_if_no_debuginfo() {
+ add_probe_vfs_getname -v 2>&1 | grep -E -q "^(Failed to find the path for the kernel|Debuginfo-analysis is not supported)|(file has no debug information)" && return 2
+ return 1
+}
+
+# check if perf is compiled with libtraceevent support
+skip_no_probe_record_support() {
+ if [ $had_vfs_getname -eq 1 ] ; then
+ perf record --dry-run -e $1 2>&1 | grep "libtraceevent is necessary for tracepoint support" && return 2
+ return 1
+ fi
+}
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
new file mode 100644
index 0000000000..3cc158a643
--- /dev/null
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -0,0 +1,170 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+# $1 name $2 extra_opt
+check_no_args()
+{
+ echo -n "Checking $1 output: no args "
+ perf stat $2 true
+ commachecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking $1 output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -a $2 true
+ commachecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking $1 output: system wide no aggregation "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -A -a --no-merge $2 true
+ commachecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking $1 output: interval "
+ perf stat -I 1000 $2 true
+ commachecker --interval
+ echo "[Success]"
+}
+
+check_event()
+{
+ echo -n "Checking $1 output: event "
+ perf stat -e cpu-clock $2 true
+ commachecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking $1 output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-core -a $2 true
+ commachecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking $1 output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-thread -a $2 true
+ commachecker --per-thread
+ echo "[Success]"
+}
+
+check_per_cache_instance()
+{
+ echo -n "Checking $1 output: per cache instance "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-cache -a $2 true
+ commachecker --per-cache
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking $1 output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-die -a $2 true
+ commachecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking $1 output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-node -a $2 true
+ commachecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking $1 output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-socket -a $2 true
+ commachecker --per-socket
+ echo "[Success]"
+}
+
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+function check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && {
+ echo 0
+ return
+ }
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && {
+ echo 1
+ return
+ }
+ fi
+ echo 0
+}
diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh
new file mode 100644
index 0000000000..bdd5a7c715
--- /dev/null
+++ b/tools/perf/tests/shell/lib/waiting.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+tenths=date\ +%s%1N
+
+# Wait for PID $1 to have $2 number of threads started
+# Time out after $3 tenths of a second or 5 seconds if $3 is ""
+wait_for_threads()
+{
+ tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50
+ start_time=$($tenths)
+ while [ -e "/proc/$1/task" ] ; do
+ th_cnt=$(find "/proc/$1/task" -mindepth 1 -maxdepth 1 -printf x | wc -c)
+ if [ "${th_cnt}" -ge "$2" ] ; then
+ return 0
+ fi
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "PID $1 does not have $2 threads"
+ return 1
+ fi
+ done
+ return 1
+}
+
+# Wait for perf record -vvv 2>$2 with PID $1 to start by looking at file $2
+# It depends on capturing perf record debug message "perf record has started"
+# Time out after $3 tenths of a second or 5 seconds if $3 is ""
+wait_for_perf_to_start()
+{
+ tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50
+ echo "Waiting for \"perf record has started\" message"
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ if grep -q "perf record has started" "$2" ; then
+ echo OK
+ break
+ fi
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "perf recording did not start"
+ return 1
+ fi
+ done
+ return 0
+}
+
+# Wait for process PID %1 to exit
+# Time out after $2 tenths of a second or 5 seconds if $2 is ""
+wait_for_process_to_exit()
+{
+ tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=50
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "PID $1 did not exit as expected"
+ return 1
+ fi
+ done
+ return 0
+}
+
+# Check if PID $1 is still running after $2 tenths of a second
+# or 0.3 seconds if $2 is ""
+is_running()
+{
+ tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=3
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ # Check for at least tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -gt $tm_out ] ; then
+ return 0
+ fi
+ done
+ echo "PID $1 exited prematurely"
+ return 1
+}
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
new file mode 100755
index 0000000000..d120e83db7
--- /dev/null
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -0,0 +1,284 @@
+#!/bin/sh
+# kernel lock contention analysis test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+result=$(mktemp /tmp/__perf_test.result.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${result}
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit ${err}
+}
+trap trap_cleanup EXIT TERM INT
+
+check() {
+ if [ "$(id -u)" != 0 ]; then
+ echo "[Skip] No root permission"
+ err=2
+ exit
+ fi
+
+ if ! perf list | grep -q lock:contention_begin; then
+ echo "[Skip] No lock contention tracepoints"
+ err=2
+ exit
+ fi
+}
+
+test_record()
+{
+ echo "Testing perf lock record and perf lock contention"
+ perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
+ # the output goes to the stderr and we expect only 1 output (-E 1)
+ perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
+test_bpf()
+{
+ echo "Testing perf lock contention --use-bpf"
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
+test_record_concurrent()
+{
+ echo "Testing perf lock record and perf lock contention at the same time"
+ perf lock record -o- -- perf bench sched messaging 2> /dev/null | \
+ perf lock contention -i- -E 1 -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
+test_aggr_task()
+{
+ echo "Testing perf lock contention --threads"
+ perf lock contention -i ${perfdata} -t -E 1 -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
+test_aggr_addr()
+{
+ echo "Testing perf lock contention --lock-addr"
+ perf lock contention -i ${perfdata} -l -E 1 -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" "$(cat "${result}" | wc -l)"
+ err=1
+ exit
+ fi
+}
+
+test_type_filter()
+{
+ echo "Testing perf lock contention --type-filter (w/ spinlock)"
+ perf lock contention -i ${perfdata} -Y spinlock -q 2> ${result}
+ if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
+ echo "[Fail] Recorded result should not have non-spinlocks:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(grep -c -v spinlock "${result}")" != "0" ]; then
+ echo "[Fail] BPF result should not have non-spinlocks:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+}
+
+test_lock_filter()
+{
+ echo "Testing perf lock contention --lock-filter (w/ tasklist_lock)"
+ perf lock contention -i ${perfdata} -l -q 2> ${result}
+ if [ "$(grep -c tasklist_lock "${result}")" != "1" ]; then
+ echo "[Skip] Could not find 'tasklist_lock'"
+ return
+ fi
+
+ perf lock contention -i ${perfdata} -L tasklist_lock -q 2> ${result}
+
+ # find out the type of tasklist_lock
+ test_lock_filter_type=$(head -1 "${result}" | awk '{ print $8 }' | sed -e 's/:.*//')
+
+ if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
+ echo "[Fail] Recorded result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(grep -c -v "${test_lock_filter_type}" "${result}")" != "0" ]; then
+ echo "[Fail] BPF result should not have non-${test_lock_filter_type} locks:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+}
+
+test_stack_filter()
+{
+ echo "Testing perf lock contention --callstack-filter (w/ unix_stream)"
+ perf lock contention -i ${perfdata} -v -q 2> ${result}
+ if [ "$(grep -c unix_stream "${result}")" = "0" ]; then
+ echo "[Skip] Could not find 'unix_stream'"
+ return
+ fi
+
+ perf lock contention -i ${perfdata} -E 1 -S unix_stream -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result should have a lock from unix_stream:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a lock from unix_stream:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+}
+
+test_aggr_task_stack_filter()
+{
+ echo "Testing perf lock contention --callstack-filter with task aggregation"
+ perf lock contention -i ${perfdata} -v -q 2> ${result}
+ if [ "$(grep -c unix_stream "${result}")" = "0" ]; then
+ echo "[Skip] Could not find 'unix_stream'"
+ return
+ fi
+
+ perf lock contention -i ${perfdata} -t -E 1 -S unix_stream -q 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] Recorded result should have a task from unix_stream:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ return
+ fi
+
+ perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ "$(cat "${result}" | wc -l)" != "1" ]; then
+ echo "[Fail] BPF result should have a task from unix_stream:" "$(cat "${result}")"
+ err=1
+ exit
+ fi
+}
+
+test_csv_output()
+{
+ echo "Testing perf lock contention CSV output"
+ perf lock contention -i ${perfdata} -E 1 -x , --output ${result}
+ # count the number of commas in the header
+ # it should have 5: contended, total-wait, max-wait, avg-wait, type, caller
+ header=$(grep "# output:" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "5" ]; then
+ echo "[Fail] Recorded result does not have enough output columns: ${header} != 5"
+ err=1
+ exit
+ fi
+ # count the number of commas in the output
+ output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "${output}" ]; then
+ echo "[Fail] Recorded result does not match the number of commas: ${header} != ${output}"
+ err=1
+ exit
+ fi
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ return
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -E 1 -x , --output ${result} -- perf bench sched messaging > /dev/null 2>&1
+ output=$(grep -v "^#" ${result} | tr -d -c , | wc -c)
+ if [ "${header}" != "${output}" ]; then
+ echo "[Fail] BPF result does not match the number of commas: ${header} != ${output}"
+ err=1
+ exit
+ fi
+}
+
+check
+
+test_record
+test_bpf
+test_record_concurrent
+test_aggr_task
+test_aggr_addr
+test_type_filter
+test_lock_filter
+test_stack_filter
+test_aggr_task_stack_filter
+test_csv_output
+
+exit ${err}
diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh
new file mode 100755
index 0000000000..8dd115dd35
--- /dev/null
+++ b/tools/perf/tests/shell/pipe_test.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+# perf pipe recording and injection test
+# SPDX-License-Identifier: GPL-2.0
+
+data=$(mktemp /tmp/perf.data.XXXXXX)
+prog="perf test -w noploop"
+task="perf"
+sym="noploop"
+
+if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then
+ echo "cannot find the test file in the perf report"
+ exit 1
+fi
+
+if ! perf record -e task-clock:u -o - ${prog} | perf inject -b | perf report -i - | grep ${sym}; then
+ echo "cannot find noploop function in pipe #1"
+ exit 1
+fi
+
+perf record -e task-clock:u -o - ${prog} | perf inject -b -o ${data}
+if ! perf report -i ${data} | grep ${sym}; then
+ echo "cannot find noploop function in pipe #2"
+ exit 1
+fi
+
+perf record -e task-clock:u -o ${data} ${prog}
+if ! perf inject -b -i ${data} | perf report -i - | grep ${sym}; then
+ echo "cannot find noploop function in pipe #3"
+ exit 1
+fi
+
+
+rm -f ${data} ${data}.old
+exit 0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
new file mode 100755
index 0000000000..871243d6d0
--- /dev/null
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# Add vfs_getname probe to get syscall args filenames
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
+. "$(dirname $0)"/lib/probe_vfs_getname.sh
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+cleanup_probe_vfs_getname
+exit $err
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
new file mode 100755
index 0000000000..89214a6d99
--- /dev/null
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+# probe libc's inet_pton & backtrace it with ping
+
+# Installs a probe on libc's inet_pton function, that will use uprobes,
+# then use 'perf trace' on a ping to localhost asking for just one packet
+# with the a backtrace 3 levels deep, check that it is what we expect.
+# This needs no debuginfo package, all is done using the libc ELF symtab
+# and the CFI info in the binaries.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. "$(dirname "$0")/lib/probe.sh"
+. "$(dirname "$0")/lib/probe_vfs_getname.sh"
+
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -Dg $libc 2>/dev/null | grep -F -q inet_pton || exit 254
+
+event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+
+add_libc_inet_pton_event() {
+
+ event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+ grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+
+ if [ $? -ne 0 ] || [ -z "$event_name" ] ; then
+ printf "FAIL: could not add event\n"
+ return 1
+ fi
+}
+
+trace_libc_inet_pton_backtrace() {
+
+ expected=`mktemp -u /tmp/expected.XXX`
+
+ echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected
+ echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ case "$(uname -m)" in
+ s390x)
+ eventattr='call-graph=dwarf,max-stack=4'
+ echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ ppc64|ppc64le)
+ eventattr='max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ *)
+ eventattr='max-stack=3'
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ esac
+
+ perf_data=`mktemp -u /tmp/perf.data.XXX`
+ perf_script=`mktemp -u /tmp/perf.script.XXX`
+
+ # Check presence of libtraceevent support to run perf record
+ skip_no_probe_record_support "$event_name/$eventattr/"
+ [ $? -eq 2 ] && return 2
+
+ perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+ # check if perf data file got created in above step.
+ if [ ! -e $perf_data ]; then
+ printf "FAIL: perf record failed to create \"%s\" \n" "$perf_data"
+ return 1
+ fi
+ perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
+
+ exec 3<$perf_script
+ exec 4<$expected
+ while read line <&3 && read -r pattern <&4; do
+ [ -z "$pattern" ] && break
+ echo $line
+ echo "$line" | grep -E -q "$pattern"
+ if [ $? -ne 0 ] ; then
+ printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+ return 1
+ fi
+ done
+
+ # If any statements are executed from this point onwards,
+ # the exit code of the last among these will be reflected
+ # in err below. If the exit code is 0, the test will pass
+ # even if the perf script output does not match.
+}
+
+delete_libc_inet_pton_event() {
+
+ if [ -n "$event_name" ] ; then
+ perf probe -q -d $event_name
+ fi
+}
+
+# Check for IPv6 interface existence
+ip a sh lo | grep -F -q inet6 || exit 2
+
+skip_if_no_perf_probe && \
+add_libc_inet_pton_event && \
+trace_libc_inet_pton_backtrace
+err=$?
+rm -f ${perf_data} ${perf_script} ${expected}
+delete_libc_inet_pton_event
+exit $err
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
new file mode 100755
index 0000000000..7f664f1889
--- /dev/null
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -0,0 +1,46 @@
+#!/bin/sh
+# Use vfs_getname probe to get syscall args filenames
+
+# Uses the 'perf test shell' library to add probe:vfs_getname to the system
+# then use it with 'perf record' using 'touch' to write to a temp file, then
+# checks that that was captured by the vfs_getname probe in the generated
+# perf.data file, with the temp file name as the pathname argument.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. "$(dirname "$0")/lib/probe.sh"
+
+skip_if_no_perf_probe || exit 2
+
+. "$(dirname "$0")/lib/probe_vfs_getname.sh"
+
+record_open_file() {
+ echo "Recording open file:"
+ # Check presence of libtraceevent support to run perf record
+ skip_no_probe_record_support "probe:vfs_getname*"
+ [ $? -eq 2 ] && return 2
+ perf record -o ${perfdata} -e probe:vfs_getname\* touch $file
+}
+
+perf_script_filenames() {
+ echo "Looking at perf.data file for vfs_getname records for the file we touched:"
+ perf script -i ${perfdata} | \
+ grep -E " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
+}
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+if [ $err -ne 0 ] ; then
+ exit $err
+fi
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+record_open_file && perf_script_filenames
+err=$?
+rm -f ${perfdata}
+rm -f ${file}
+cleanup_probe_vfs_getname
+exit $err
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
new file mode 100755
index 0000000000..8929046e90
--- /dev/null
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Zstd perf.data compression/decompression
+
+# SPDX-License-Identifier: GPL-2.0
+
+trace_file=$(mktemp /tmp/perf.data.XXX)
+perf_tool=perf
+
+skip_if_no_z_record() {
+ $perf_tool record -h 2>&1 | grep -q '\-z, \-\-compression\-level'
+}
+
+collect_z_record() {
+ echo "Collecting compressed record file:"
+ [ "$(uname -m)" != s390x ] && gflag='-g'
+ $perf_tool record -o "$trace_file" $gflag -z -F 5000 -- \
+ dd count=500 if=/dev/urandom of=/dev/null
+}
+
+check_compressed_stats() {
+ echo "Checking compressed events stats:"
+ $perf_tool report -i "$trace_file" --header --stats | \
+ grep -E "(# compressed : Zstd,)|(COMPRESSED events:)"
+}
+
+check_compressed_output() {
+ $perf_tool inject -i "$trace_file" -o "$trace_file.decomp" &&
+ $perf_tool report -i "$trace_file" --stdio -F comm,dso,sym | head -n -3 > "$trace_file.comp.output" &&
+ $perf_tool report -i "$trace_file.decomp" --stdio -F comm,dso,sym | head -n -3 > "$trace_file.decomp.output" &&
+ diff "$trace_file.comp.output" "$trace_file.decomp.output"
+}
+
+skip_if_no_z_record || exit 2
+collect_z_record && check_compressed_stats && check_compressed_output
+err=$?
+rm -f "$trace_file*"
+exit $err
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
new file mode 100755
index 0000000000..4fbc74805d
--- /dev/null
+++ b/tools/perf/tests/shell/record.sh
@@ -0,0 +1,163 @@
+#!/bin/sh
+# perf record tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+shelldir=$(dirname "$0")
+. "${shelldir}"/lib/waiting.sh
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+testprog="perf test -w thloop"
+testsym="test_loop"
+
+cleanup() {
+ rm -rf "${perfdata}"
+ rm -rf "${perfdata}".old
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_per_thread() {
+ echo "Basic --per-thread mode test"
+ if ! perf record -o /dev/null --quiet ${testprog} 2> /dev/null
+ then
+ echo "Per-thread record [Skipped event not supported]"
+ return
+ fi
+ if ! perf record --per-thread -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Per-thread record [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Per-thread record [Failed missing output]"
+ err=1
+ return
+ fi
+
+ # run the test program in background (for 30 seconds)
+ ${testprog} 30 &
+ TESTPID=$!
+
+ rm -f "${perfdata}"
+
+ wait_for_threads ${TESTPID} 2
+ perf record -p "${TESTPID}" --per-thread -o "${perfdata}" sleep 1 2> /dev/null
+ kill ${TESTPID}
+
+ if [ ! -e "${perfdata}" ]
+ then
+ echo "Per-thread record [Failed record -p]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Per-thread record [Failed -p missing output]"
+ err=1
+ return
+ fi
+
+ echo "Basic --per-thread mode test [Success]"
+}
+
+test_register_capture() {
+ echo "Register capture test"
+ if ! perf list | grep -q 'br_inst_retired.near_call'
+ then
+ echo "Register capture test [Skipped missing event]"
+ return
+ fi
+ if ! perf record --intr-regs=\? 2>&1 | grep -q 'available registers: AX BX CX DX SI DI BP SP IP FLAGS CS SS R8 R9 R10 R11 R12 R13 R14 R15'
+ then
+ echo "Register capture test [Skipped missing registers]"
+ return
+ fi
+ if ! perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call \
+ -c 1000 --per-thread ${testprog} 2> /dev/null \
+ | perf script -F ip,sym,iregs -i - 2> /dev/null \
+ | grep -q "DI:"
+ then
+ echo "Register capture test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Register capture test [Success]"
+}
+
+test_system_wide() {
+ echo "Basic --system-wide mode test"
+ if ! perf record -aB --synth=no -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "System-wide record [Skipped not supported]"
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "System-wide record [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf record -aB --synth=no -e cpu-clock,cs --threads=cpu \
+ -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "System-wide record [Failed record --threads option]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "System-wide record [Failed --threads missing output]"
+ err=1
+ return
+ fi
+ echo "Basic --system-wide mode test [Success]"
+}
+
+test_workload() {
+ echo "Basic target workload test"
+ if ! perf record -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Workload record [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Workload record [Failed missing output]"
+ err=1
+ return
+ fi
+ if ! perf record -e cpu-clock,cs --threads=package \
+ -o "${perfdata}" ${testprog} 2> /dev/null
+ then
+ echo "Workload record [Failed record --threads option]"
+ err=1
+ return
+ fi
+ if ! perf report -i "${perfdata}" -q | grep -q "${testsym}"
+ then
+ echo "Workload record [Failed --threads missing output]"
+ err=1
+ return
+ fi
+ echo "Basic target workload test [Success]"
+}
+
+test_per_thread
+test_register_capture
+test_system_wide
+test_workload
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_bpf_filter.sh b/tools/perf/tests/shell/record_bpf_filter.sh
new file mode 100755
index 0000000000..31c593966e
--- /dev/null
+++ b/tools/perf/tests/shell/record_bpf_filter.sh
@@ -0,0 +1,134 @@
+#!/bin/sh
+# perf record sample filtering (by BPF) tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f "${perfdata}"
+ rm -f "${perfdata}".old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_bpf_filter_priv() {
+ echo "Checking BPF-filter privilege"
+
+ if [ "$(id -u)" != 0 ]
+ then
+ echo "bpf-filter test [Skipped permission]"
+ err=2
+ return
+ fi
+ if ! perf record -e task-clock --filter 'period > 1' \
+ -o /dev/null --quiet true 2>&1
+ then
+ echo "bpf-filter test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+}
+
+test_bpf_filter_basic() {
+ echo "Basic bpf-filter test"
+
+ if ! perf record -e task-clock -c 10000 --filter 'ip < 0xffffffff00000000' \
+ -o "${perfdata}" true 2> /dev/null
+ then
+ echo "Basic bpf-filter test [Failed record]"
+ err=1
+ return
+ fi
+ if perf script -i "${perfdata}" -F ip | grep 'ffffffff[0-9a-f]*'
+ then
+ if uname -r | grep -q ^6.2
+ then
+ echo "Basic bpf-filter test [Skipped unsupported kernel]"
+ err=2
+ return
+ fi
+ echo "Basic bpf-filter test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Basic bpf-filter test [Success]"
+}
+
+test_bpf_filter_fail() {
+ echo "Failing bpf-filter test"
+
+ # 'cpu' requires PERF_SAMPLE_CPU flag
+ if ! perf record -e task-clock --filter 'cpu > 0' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ then
+ echo "Failing bpf-filter test [Failed forbidden CPU]"
+ err=1
+ return
+ fi
+
+ if ! perf record --sample-cpu -e task-clock --filter 'cpu > 0' \
+ -o /dev/null true 2>/dev/null
+ then
+ echo "Failing bpf-filter test [Failed should succeed]"
+ err=1
+ return
+ fi
+
+ echo "Failing bpf-filter test [Success]"
+}
+
+test_bpf_filter_group() {
+ echo "Group bpf-filter test"
+
+ if ! perf record -e task-clock --filter 'period > 1000 || ip > 0' \
+ -o /dev/null true 2>/dev/null
+ then
+ echo "Group bpf-filter test [Failed should succeed]"
+ err=1
+ return
+ fi
+
+ if ! perf record -e task-clock --filter 'cpu > 0 || ip > 0' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CPU
+ then
+ echo "Group bpf-filter test [Failed forbidden CPU]"
+ err=1
+ return
+ fi
+
+ if ! perf record -e task-clock --filter 'period > 0 || code_pgsz > 4096' \
+ -o /dev/null true 2>&1 | grep PERF_SAMPLE_CODE_PAGE_SIZE
+ then
+ echo "Group bpf-filter test [Failed forbidden CODE_PAGE_SIZE]"
+ err=1
+ return
+ fi
+
+ echo "Group bpf-filter test [Success]"
+}
+
+
+test_bpf_filter_priv
+
+if [ $err = 0 ]; then
+ test_bpf_filter_basic
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_fail
+fi
+
+if [ $err = 0 ]; then
+ test_bpf_filter_group
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
new file mode 100755
index 0000000000..a0d14cd0aa
--- /dev/null
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -0,0 +1,103 @@
+#!/bin/sh
+# perf record offcpu profiling tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+test_offcpu_priv() {
+ echo "Checking off-cpu privilege"
+
+ if [ "$(id -u)" != 0 ]
+ then
+ echo "off-cpu test [Skipped permission]"
+ err=2
+ return
+ fi
+ if perf record --off-cpu -o /dev/null --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ then
+ echo "off-cpu test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+}
+
+test_offcpu_basic() {
+ echo "Basic off-cpu test"
+
+ if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Basic off-cpu test [Failed no event]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q --percent-limit=90 | grep -E -q sleep
+ then
+ echo "Basic off-cpu test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic off-cpu test [Success]"
+}
+
+test_offcpu_child() {
+ echo "Child task off-cpu test"
+
+ # perf bench sched messaging creates 400 processes
+ if ! perf record --off-cpu -e dummy -o ${perfdata} -- \
+ perf bench sched messaging -g 10 > /dev/null 2>&1
+ then
+ echo "Child task off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Child task off-cpu test [Failed no event]"
+ err=1
+ return
+ fi
+ # each process waits for read and write, so it should be more than 800 events
+ if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \
+ awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }'
+ then
+ echo "Child task off-cpu test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Child task off-cpu test [Success]"
+}
+
+
+test_offcpu_priv
+
+if [ $err = 0 ]; then
+ test_offcpu_basic
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_child
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
new file mode 100755
index 0000000000..d890eb26e9
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+# perf stat CSV output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Tests various perf stat CSV output commands for the
+# correct number of fields and the CSV separator set to ','.
+
+set -e
+
+. "$(dirname $0)"/lib/stat_output.sh
+
+csv_sep=@
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.csv.XXXXX)
+
+cleanup() {
+ rm -f "${stat_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+function commachecker()
+{
+ local -i cnt=0
+ local exp=0
+
+ case "$1"
+ in "--no-args") exp=6
+ ;; "--system-wide") exp=6
+ ;; "--event") exp=6
+ ;; "--interval") exp=7
+ ;; "--per-thread") exp=7
+ ;; "--system-wide-no-aggr") exp=7
+ [ "$(uname -m)" = "s390x" ] && exp='^[6-7]$'
+ ;; "--per-core") exp=8
+ ;; "--per-socket") exp=8
+ ;; "--per-node") exp=8
+ ;; "--per-die") exp=8
+ ;; "--per-cache") exp=8
+ esac
+
+ while read line
+ do
+ # Ignore initial "started on" comment.
+ x=${line:0:1}
+ [ "$x" = "#" ] && continue
+ # Ignore initial blank line.
+ [ "$line" = "" ] && continue
+
+ # Count the number of commas
+ x=$(echo $line | tr -d -c $csv_sep)
+ cnt="${#x}"
+ # echo $line $cnt
+ [[ ! "$cnt" =~ $exp ]] && {
+ echo "wrong number of fields. expected $exp in $line" 1>&2
+ exit 1;
+ }
+ done < "${stat_output}"
+ return 0
+}
+
+perf_cmd="-x$csv_sep -o ${stat_output}"
+
+skip_test=$(check_for_topology)
+check_no_args "CSV" "$perf_cmd"
+check_system_wide "CSV" "$perf_cmd"
+check_interval "CSV" "$perf_cmd"
+check_event "CSV" "$perf_cmd"
+check_per_thread "CSV" "$perf_cmd"
+check_per_node "CSV" "$perf_cmd"
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr "CSV" "$perf_cmd"
+ check_per_core "CSV" "$perf_cmd"
+ check_per_cache_instance "CSV" "$perf_cmd"
+ check_per_die "CSV" "$perf_cmd"
+ check_per_socket "CSV" "$perf_cmd"
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
new file mode 100755
index 0000000000..8bae9c8a83
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# perf stat csv summary test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+#
+# 1.001364330 9224197 cycles 8012885033 100.00
+# summary 9224197 cycles 8012885033 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary 2>&1 | \
+grep -e summary | \
+while read summary _num _event _run _pct
+do
+ if [ $summary != "summary" ]; then
+ exit 1
+ fi
+done
+
+#
+# 1.001360298 9148534 cycles 8012853854 100.00
+#9148534 cycles 8012853854 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary --no-csv-summary 2>&1 | \
+grep -e summary | \
+while read _num _event _run _pct
+do
+ exit 1
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
new file mode 100755
index 0000000000..196e22672c
--- /dev/null
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -0,0 +1,219 @@
+#!/bin/bash
+# perf stat JSON output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Checks various perf stat JSON output commands for the
+# correct number of fields.
+
+set -e
+
+skip_test=0
+
+pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ elif which python > /dev/null
+ then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.json.XXXXX)
+
+cleanup() {
+ rm -f "${stat_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ "$(id -u)" != 0 ] && [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking json output: no args "
+ perf stat -j -o "${stat_output}" true
+ $PYTHON $pythonchecker --no-args --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --system-wide --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking json output: system wide no aggregation "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j -A -a --no-merge -o "${stat_output}" true
+ $PYTHON $pythonchecker --system-wide-no-aggr --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking json output: interval "
+ perf stat -j -I 1000 -o "${stat_output}" true
+ $PYTHON $pythonchecker --interval --file "${stat_output}"
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking json output: event "
+ perf stat -j -e cpu-clock -o "${stat_output}" true
+ $PYTHON $pythonchecker --event --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking json output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-core -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --per-core --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking json output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-thread -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --per-thread --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_per_cache_instance()
+{
+ echo -n "Checking json output: per cache_instance "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-cache -a true 2>&1 | $PYTHON $pythonchecker --per-cache
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking json output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-die -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --per-die --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking json output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-node -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --per-node --file "${stat_output}"
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking json output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-socket -a -o "${stat_output}" true
+ $PYTHON $pythonchecker --per-socket --file "${stat_output}"
+ echo "[Success]"
+}
+
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && return 0
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && skip_test=1
+ return 0
+ fi
+}
+
+check_for_topology
+check_no_args
+check_system_wide
+check_interval
+check_event
+check_per_thread
+check_per_node
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr
+ check_per_core
+ check_per_cache_instance
+ check_per_die
+ check_per_socket
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
new file mode 100755
index 0000000000..a1918a15e3
--- /dev/null
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# perf stat metrics (shadow stat) test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# skip if system-wide mode is forbidden
+perf stat -a true > /dev/null 2>&1 || exit 2
+
+# skip if on hybrid platform
+perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2
+
+test_global_aggr()
+{
+ perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ grep -e cycles -e instructions | \
+ while read num evt _hash ipc rest
+ do
+ # skip not counted events
+ if [ "$num" = "<not" ]; then
+ continue
+ fi
+
+ # save cycles count
+ if [ "$evt" = "cycles" ]; then
+ cyc=$num
+ continue
+ fi
+
+ # skip if no cycles
+ if [ -z "$cyc" ]; then
+ continue
+ fi
+
+ # use printf for rounding and a leading zero
+ res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
+ if [ "$ipc" != "$res" ]; then
+ echo "IPC is different: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+ done
+}
+
+test_no_aggr()
+{
+ perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ grep ^CPU | \
+ while read cpu num evt _hash ipc rest
+ do
+ # skip not counted events
+ if [ "$num" = "<not" ]; then
+ continue
+ fi
+
+ # save cycles count
+ if [ "$evt" = "cycles" ]; then
+ results="$results $cpu:$num"
+ continue
+ fi
+
+ cyc=${results##* $cpu:}
+ cyc=${cyc%% *}
+
+ # skip if no cycles
+ if [ -z "$cyc" ]; then
+ continue
+ fi
+
+ # use printf for rounding and a leading zero
+ res=`printf "%.2f" "$(echo "scale=6; $num / $cyc" | bc -q)"`
+ if [ "$ipc" != "$res" ]; then
+ echo "IPC is different for $cpu: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+ done
+}
+
+test_global_aggr
+test_no_aggr
+
+exit 0
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
new file mode 100755
index 0000000000..fb2b10547a
--- /dev/null
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# perf stat STD output linter
+# SPDX-License-Identifier: GPL-2.0
+# Tests various perf stat STD output commands for
+# default event and metricgroup
+
+set -e
+
+. "$(dirname $0)"/lib/stat_output.sh
+
+stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
+
+event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
+event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
+skip_metric=("stalled cycles per insn" "tma_")
+
+cleanup() {
+ rm -f "${stat_output}"
+
+ trap - EXIT TERM INT
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup EXIT TERM INT
+
+function commachecker()
+{
+ local prefix=1
+
+ case "$1"
+ in "--interval") prefix=2
+ ;; "--per-thread") prefix=2
+ ;; "--system-wide-no-aggr") prefix=2
+ ;; "--per-core") prefix=3
+ ;; "--per-socket") prefix=3
+ ;; "--per-node") prefix=3
+ ;; "--per-die") prefix=3
+ ;; "--per-cache") prefix=3
+ esac
+
+ while read line
+ do
+ # Ignore initial "started on" comment.
+ x=${line:0:1}
+ [ "$x" = "#" ] && continue
+ # Ignore initial blank line.
+ [ "$line" = "" ] && continue
+ # Ignore "Performance counter stats"
+ x=${line:0:25}
+ [ "$x" = "Performance counter stats" ] && continue
+ # Ignore "seconds time elapsed" and break
+ [[ "$line" == *"time elapsed"* ]] && break
+
+ main_body=$(echo $line | cut -d' ' -f$prefix-)
+ x=${main_body%#*}
+ [ "$x" = "" ] && continue
+
+ # Skip metrics without event name
+ y=${main_body#*#}
+ for i in "${!skip_metric[@]}"; do
+ [[ "$y" == *"${skip_metric[$i]}"* ]] && break
+ done
+ [[ "$y" == *"${skip_metric[$i]}"* ]] && continue
+
+ # Check default event
+ for i in "${!event_name[@]}"; do
+ [[ "$x" == *"${event_name[$i]}"* ]] && break
+ done
+
+ [[ ! "$x" == *"${event_name[$i]}"* ]] && {
+ echo "Unknown event name in $line" 1>&2
+ exit 1;
+ }
+
+ # Check event metric if it exists
+ [[ ! "$main_body" == *"#"* ]] && continue
+ [[ ! "$main_body" == *"${event_metric[$i]}"* ]] && {
+ echo "wrong event metric. expected ${event_metric[$i]} in $line" 1>&2
+ exit 1;
+ }
+ done < "${stat_output}"
+ return 0
+}
+
+perf_cmd="-o ${stat_output}"
+
+skip_test=$(check_for_topology)
+check_no_args "STD" "$perf_cmd"
+check_system_wide "STD" "$perf_cmd"
+check_interval "STD" "$perf_cmd"
+check_per_thread "STD" "$perf_cmd"
+check_per_node "STD" "$perf_cmd"
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr "STD" "$perf_cmd"
+ check_per_core "STD" "$perf_cmd"
+ check_per_cache_instance "STD" "$perf_cmd"
+ check_per_die "STD" "$perf_cmd"
+ check_per_socket "STD" "$perf_cmd"
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+cleanup
+exit 0
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
new file mode 100755
index 0000000000..3f1e677954
--- /dev/null
+++ b/tools/perf/tests/shell/stat.sh
@@ -0,0 +1,156 @@
+#!/bin/sh
+# perf stat tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_default_stat() {
+ echo "Basic stat command test"
+ if ! perf stat true 2>&1 | grep -E -q "Performance counter stats for 'true':"
+ then
+ echo "Basic stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic stat command test [Success]"
+}
+
+test_stat_record_report() {
+ echo "stat record and report test"
+ if ! perf stat record -o - true | perf stat report -i - 2>&1 | \
+ grep -E -q "Performance counter stats for 'pipe':"
+ then
+ echo "stat record and report test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat record and report test [Success]"
+}
+
+test_stat_record_script() {
+ echo "stat record and script test"
+ if ! perf stat record -o - true | perf script -i - 2>&1 | \
+ grep -E -q "CPU[[:space:]]+THREAD[[:space:]]+VAL[[:space:]]+ENA[[:space:]]+RUN[[:space:]]+TIME[[:space:]]+EVENT"
+ then
+ echo "stat record and script test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat record and script test [Success]"
+}
+
+test_stat_repeat_weak_groups() {
+ echo "stat repeat weak groups test"
+ if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
+ true 2>&1 | grep -q 'seconds time elapsed'
+ then
+ echo "stat repeat weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \
+ true > /dev/null 2>&1
+ then
+ echo "stat repeat weak groups test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat repeat weak groups test [Success]"
+}
+
+test_topdown_groups() {
+ # Topdown events must be grouped with the slots event first. Test that
+ # parse-events reorders this.
+ echo "Topdown event group test"
+ if ! perf stat -e '{slots,topdown-retiring}' true > /dev/null 2>&1
+ then
+ echo "Topdown event group test [Skipped event parsing failed]"
+ return
+ fi
+ if perf stat -e '{slots,topdown-retiring}' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed events not supported]"
+ err=1
+ return
+ fi
+ if perf stat -e '{topdown-retiring,slots}' true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed slots not reordered first]"
+ err=1
+ return
+ fi
+ echo "Topdown event group test [Success]"
+}
+
+test_topdown_weak_groups() {
+ # Weak groups break if the perf_event_open of multiple grouped events
+ # fails. Breaking a topdown group causes the events to fail. Test a very large
+ # grouping to see that the topdown events aren't broken out.
+ echo "Topdown weak groups test"
+ ok_grouping="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring},branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references"
+ if ! perf stat --no-merge -e "$ok_grouping" true > /dev/null 2>&1
+ then
+ echo "Topdown weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ group_needs_break="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references}:W"
+ if perf stat --no-merge -e "$group_needs_break" true 2>&1 | grep -E -q "<not supported>"
+ then
+ echo "Topdown weak groups test [Failed events not supported]"
+ err=1
+ return
+ fi
+ echo "Topdown weak groups test [Success]"
+}
+
+test_cputype() {
+ # Test --cputype argument.
+ echo "cputype test"
+
+ # Bogus PMU should fail.
+ if perf stat --cputype="123" -e instructions true > /dev/null 2>&1
+ then
+ echo "cputype test [Bogus PMU didn't fail]"
+ err=1
+ return
+ fi
+
+ # Find a known PMU for cputype.
+ pmu=""
+ for i in cpu cpu_atom armv8_pmuv3_0
+ do
+ if test -d "/sys/devices/$i"
+ then
+ pmu="$i"
+ break
+ fi
+ if perf stat -e "$i/instructions/" true > /dev/null 2>&1
+ then
+ pmu="$i"
+ break
+ fi
+ done
+ if test "x$pmu" = "x"
+ then
+ echo "cputype test [Skipped known PMU not found]"
+ return
+ fi
+
+ # Test running with cputype produces output.
+ if ! perf stat --cputype="$pmu" -e instructions true 2>&1 | grep -E -q "instructions"
+ then
+ echo "cputype test [Failed count missed with given filter]"
+ err=1
+ return
+ fi
+ echo "cputype test [Success]"
+}
+
+test_default_stat
+test_stat_record_report
+test_stat_record_script
+test_stat_repeat_weak_groups
+test_topdown_groups
+test_topdown_weak_groups
+test_cputype
+exit $err
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
new file mode 100755
index 0000000000..cb35e48880
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# perf all metricgroups test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+for m in $(perf list --raw-dump metricgroups); do
+ echo "Testing $m"
+ perf stat -M "$m" -a true
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
new file mode 100755
index 0000000000..54774525e1
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# perf all metrics test
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+for m in $(perf list --raw-dump metrics); do
+ echo "Testing $m"
+ result=$(perf stat -M "$m" true 2>&1)
+ if [[ "$result" =~ ${m:0:50} ]] || [[ "$result" =~ "<not supported>" ]]
+ then
+ continue
+ fi
+ # Failed so try system wide.
+ result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
+ if [[ "$result" =~ ${m:0:50} ]]
+ then
+ continue
+ fi
+ # Failed again, possibly the workload was too small so retry with something
+ # longer.
+ result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
+ if [[ "$result" =~ ${m:0:50} ]]
+ then
+ continue
+ fi
+ echo "Metric '$m' not printed in:"
+ echo "$result"
+ if [[ "$err" != "1" ]]
+ then
+ err=2
+ if [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "Skip, not fail, for FP issues"
+ elif [[ "$result" =~ "PMM" ]]
+ then
+ echo "Skip, not fail, for Optane memory issues"
+ else
+ err=1
+ fi
+ fi
+done
+
+exit "$err"
diff --git a/tools/perf/tests/shell/stat_all_pfm.sh b/tools/perf/tests/shell/stat_all_pfm.sh
new file mode 100755
index 0000000000..4d004f777a
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_pfm.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+# perf all libpfm4 events test
+# SPDX-License-Identifier: GPL-2.0
+
+if perf version --build-options | grep HAVE_LIBPFM | grep -q OFF
+then
+ echo "Skipping, no libpfm4 support"
+ exit 2
+fi
+
+err=0
+for p in $(perf list --raw-dump pfm)
+do
+ if echo "$p" | grep -q unc_
+ then
+ echo "Skipping uncore event '$p' that may require additional options."
+ continue
+ fi
+ echo "Testing $p"
+ result=$(perf stat --pfm-events "$p" true 2>&1)
+ x=$?
+ if echo "$result" | grep -q "failed to parse event $p : invalid or missing unit mask"
+ then
+ continue
+ fi
+ if test "$x" -ne "0"
+ then
+ echo "Unexpected exit code '$x'"
+ err=1
+ fi
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>"
+ then
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat --pfm-events "$p" perf bench internals synthesize 2>&1)
+ x=$?
+ if test "$x" -ne "0"
+ then
+ echo "Unexpected exit code '$x'"
+ err=1
+ fi
+ if ! echo "$result" | grep -q "$p"
+ then
+ echo "Event '$p' not printed in:"
+ echo "$result"
+ err=1
+ fi
+ fi
+done
+
+exit "$err"
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
new file mode 100755
index 0000000000..c779554191
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# perf all PMU test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Test all PMU events; however exclude parametrized ones (name contains '?')
+for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if ! echo "$result" | grep -q "$p" ; then
+ echo "Event '$p' not printed in:"
+ echo "$result"
+ exit 1
+ fi
+ fi
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
new file mode 100755
index 0000000000..a87bb2814b
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# perf stat --bpf-counters test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# check whether $2 is within +/- 10% of $1
+compare_number()
+{
+ first_num=$1
+ second_num=$2
+
+ # upper bound is first_num * 110%
+ upper=$(expr $first_num + $first_num / 10 )
+ # lower bound is first_num * 90%
+ lower=$(expr $first_num - $first_num / 10 )
+
+ if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
+ echo "The difference between $first_num and $second_num are greater than 10%."
+ exit 1
+ fi
+}
+
+# skip if --bpf-counters is not supported
+if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+ if [ "$1" = "-v" ]; then
+ echo "Skipping: --bpf-counters not supported"
+ perf --no-pager stat -e cycles --bpf-counters true || true
+ fi
+ exit 2
+fi
+
+base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$base_cycles" = "<not" ]; then
+ echo "Skipping: cycles event not counted"
+ exit 2
+fi
+bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$bpf_cycles" = "<not" ]; then
+ echo "Failed: cycles not counted with --bpf-counters"
+ exit 1
+fi
+
+compare_number $base_cycles $bpf_cycles
+exit 0
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
new file mode 100755
index 0000000000..e75d0780dc
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -0,0 +1,79 @@
+#!/bin/sh
+# perf stat --bpf-counters --for-each-cgroup test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+test_cgroups=
+if [ "$1" = "-v" ]; then
+ verbose="1"
+fi
+
+# skip if --bpf-counters --for-each-cgroup is not supported
+check_bpf_counter()
+{
+ if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then
+ if [ "${verbose}" = "1" ]; then
+ echo "Skipping: --bpf-counters --for-each-cgroup not supported"
+ perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true
+ fi
+ exit 2
+ fi
+}
+
+# find two cgroups to measure
+find_cgroups()
+{
+ # try usual systemd slices first
+ if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
+ test_cgroups="system.slice,user.slice"
+ return
+ fi
+
+ # try root and self cgroups
+ find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+ if [ -z ${find_cgroups_self_cgrp} ]; then
+ # cgroup v2 doesn't specify perf_event
+ find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ fi
+
+ if [ -z ${find_cgroups_self_cgrp} ]; then
+ test_cgroups="/"
+ else
+ test_cgroups="/,${find_cgroups_self_cgrp}"
+ fi
+}
+
+# As cgroup events are cpu-wide, we cannot simply compare the result.
+# Just check if it runs without failure and has non-zero results.
+check_system_wide_counted()
+{
+ check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
+ if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
+ echo "Some system-wide events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${check_system_wide_counted_output}
+ fi
+ exit 1
+ fi
+}
+
+check_cpu_list_counted()
+{
+ check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
+ if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
+ echo "Some CPU events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${check_cpu_list_counted_output}
+ fi
+ exit 1
+ fi
+}
+
+check_bpf_counter
+find_cgroups
+
+check_system_wide_counted
+check_cpu_list_counted
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
new file mode 100755
index 0000000000..ad94c936de
--- /dev/null
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# perf metrics value validation
+# SPDX-License-Identifier: GPL-2.0
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ else
+ echo Skipping test, python3 not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; }
+
+pythonvalidator=$(dirname $0)/lib/perf_metric_validation.py
+rulefile=$(dirname $0)/lib/perf_metric_validation_rules.json
+tmpdir=$(mktemp -d /tmp/__perf_test.program.XXXXX)
+workload="perf bench futex hash -r 2 -s"
+
+# Add -debug, save data file and full rule file
+echo "Launch python validation script $pythonvalidator"
+echo "Output will be stored in: $tmpdir"
+$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
+ret=$?
+rm -rf $tmpdir
+
+exit $ret
+
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
new file mode 100755
index 0000000000..66dfdfdad5
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+# Check Arm64 callgraphs are complete in fp mode
+# SPDX-License-Identifier: GPL-2.0
+
+lscpu | grep -q "aarch64" || exit 2
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+TEST_PROGRAM="perf test -w leafloop"
+
+cleanup_files()
+{
+ rm -f "$PERF_DATA"
+}
+
+trap cleanup_files EXIT TERM INT
+
+# Add a 1 second delay to skip samples that are not in the leaf() function
+# shellcheck disable=SC2086
+perf record -o "$PERF_DATA" --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+PID=$!
+
+echo " + Recording (PID=$PID)..."
+sleep 2
+echo " + Stopping perf-record..."
+
+kill $PID
+wait $PID
+
+# expected perf-script output:
+#
+# program
+# 728 leaf
+# 753 parent
+# 76c leafloop
+# ...
+
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4
+perf script -i "$PERF_DATA" -F comm,ip,sym | head -n4 | \
+ awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
+ sym[1] != "parent" ||
+ sym[2] != "leafloop") exit 1 }'
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
new file mode 100755
index 0000000000..f1bf562116
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -0,0 +1,212 @@
+#!/bin/sh
+# Check Arm CoreSight trace data recording and synthesized samples
+
+# Uses the 'perf record' to record trace data with Arm CoreSight sinks;
+# then verify if there have any branch samples and instruction samples
+# are generated by CoreSight with 'perf script' and 'perf report'
+# commands.
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2020
+
+glb_err=0
+
+skip_if_no_cs_etm_event() {
+ perf list | grep -q 'cs_etm//' && return 0
+
+ # cs_etm event doesn't exist
+ return 2
+}
+
+skip_if_no_cs_etm_event || exit 2
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+cleanup_files()
+{
+ rm -f ${perfdata}
+ rm -f ${file}
+ rm -f "${perfdata}.old"
+ trap - EXIT TERM INT
+ exit $glb_err
+}
+
+trap cleanup_files EXIT TERM INT
+
+record_touch_file() {
+ echo "Recording trace (only user mode) with path: CPU$2 => $1"
+ rm -f $file
+ perf record -o ${perfdata} -e cs_etm/@$1/u --per-thread \
+ -- taskset -c $2 touch $file > /dev/null 2>&1
+}
+
+perf_script_branch_samples() {
+ echo "Looking at perf.data file for dumping branch samples:"
+
+ # Below is an example of the branch samples dumping:
+ # touch 6512 1 branches:u: ffffb220824c strcmp+0xc (/lib/aarch64-linux-gnu/ld-2.27.so)
+ # touch 6512 1 branches:u: ffffb22082e0 strcmp+0xa0 (/lib/aarch64-linux-gnu/ld-2.27.so)
+ # touch 6512 1 branches:u: ffffb2208320 strcmp+0xe0 (/lib/aarch64-linux-gnu/ld-2.27.so)
+ perf script -F,-time -i ${perfdata} 2>&1 | \
+ grep -E " +$1 +[0-9]+ .* +branches:(.*:)? +" > /dev/null 2>&1
+}
+
+perf_report_branch_samples() {
+ echo "Looking at perf.data file for reporting branch samples:"
+
+ # Below is an example of the branch samples reporting:
+ # 73.04% 73.04% touch libc-2.27.so [.] _dl_addr
+ # 7.71% 7.71% touch libc-2.27.so [.] getenv
+ # 2.59% 2.59% touch ld-2.27.so [.] strcmp
+ perf report --stdio -i ${perfdata} 2>&1 | \
+ grep -E " +[0-9]+\.[0-9]+% +[0-9]+\.[0-9]+% +$1 " > /dev/null 2>&1
+}
+
+perf_report_instruction_samples() {
+ echo "Looking at perf.data file for instruction samples:"
+
+ # Below is an example of the instruction samples reporting:
+ # 68.12% touch libc-2.27.so [.] _dl_addr
+ # 5.80% touch libc-2.27.so [.] getenv
+ # 4.35% touch ld-2.27.so [.] _dl_fixup
+ perf report --itrace=i20i --stdio -i ${perfdata} 2>&1 | \
+ grep -E " +[0-9]+\.[0-9]+% +$1" > /dev/null 2>&1
+}
+
+arm_cs_report() {
+ if [ $2 != 0 ]; then
+ echo "$1: FAIL"
+ glb_err=$2
+ else
+ echo "$1: PASS"
+ fi
+}
+
+is_device_sink() {
+ # If the node of "enable_sink" is existed under the device path, this
+ # means the device is a sink device. Need to exclude 'tpiu' since it
+ # cannot support perf PMU.
+ echo "$1" | grep -E -q -v "tpiu"
+
+ if [ $? -eq 0 ] && [ -e "$1/enable_sink" ]; then
+
+ pmu_dev="/sys/bus/event_source/devices/cs_etm/sinks/$2"
+
+ # Warn if the device is not supported by PMU
+ if ! [ -f $pmu_dev ]; then
+ echo "PMU doesn't support $pmu_dev"
+ fi
+
+ return 0
+ fi
+
+ # Otherwise, it's not a sink device
+ return 1
+}
+
+arm_cs_iterate_devices() {
+ for dev in $1/connections/out\:*; do
+
+ # Skip testing if it's not a directory
+ ! [ -d $dev ] && continue;
+
+ # Read out its symbol link file name
+ path=`readlink -f $dev`
+
+ # Extract device name from path, e.g.
+ # path = '/sys/devices/platform/20010000.etf/tmc_etf0'
+ # `> device_name = 'tmc_etf0'
+ device_name=$(basename $path)
+
+ if is_device_sink $path $device_name; then
+
+ record_touch_file $device_name $2 &&
+ perf_script_branch_samples touch &&
+ perf_report_branch_samples touch &&
+ perf_report_instruction_samples touch
+
+ err=$?
+ arm_cs_report "CoreSight path testing (CPU$2 -> $device_name)" $err
+ fi
+
+ arm_cs_iterate_devices $dev $2
+ done
+}
+
+arm_cs_etm_traverse_path_test() {
+ # Iterate for every ETM device
+ for dev in /sys/bus/coresight/devices/etm*; do
+
+ # Find the ETM device belonging to which CPU
+ cpu=`cat $dev/cpu`
+
+ # Use depth-first search (DFS) to iterate outputs
+ arm_cs_iterate_devices $dev $cpu
+ done
+}
+
+arm_cs_etm_system_wide_test() {
+ echo "Recording trace with system wide mode"
+ perf record -o ${perfdata} -e cs_etm// -a -- ls > /dev/null 2>&1
+
+ # System-wide mode should include perf samples so test for that
+ # instead of ls
+ perf_script_branch_samples perf &&
+ perf_report_branch_samples perf &&
+ perf_report_instruction_samples perf
+
+ err=$?
+ arm_cs_report "CoreSight system wide testing" $err
+}
+
+arm_cs_etm_snapshot_test() {
+ echo "Recording trace with snapshot mode"
+ perf record -o ${perfdata} -e cs_etm// -S \
+ -- dd if=/dev/zero of=/dev/null > /dev/null 2>&1 &
+ PERFPID=$!
+
+ # Wait for perf program
+ sleep 1
+
+ # Send signal to snapshot trace data
+ kill -USR2 $PERFPID
+
+ # Stop perf program
+ kill $PERFPID
+ wait $PERFPID
+
+ perf_script_branch_samples dd &&
+ perf_report_branch_samples dd &&
+ perf_report_instruction_samples dd
+
+ err=$?
+ arm_cs_report "CoreSight snapshot testing" $err
+}
+
+arm_cs_etm_basic_test() {
+ echo "Recording trace with '$*'"
+ perf record -o ${perfdata} "$@" -- ls > /dev/null 2>&1
+
+ perf_script_branch_samples ls &&
+ perf_report_branch_samples ls &&
+ perf_report_instruction_samples ls
+
+ err=$?
+ arm_cs_report "CoreSight basic testing with '$*'" $err
+}
+
+arm_cs_etm_traverse_path_test
+arm_cs_etm_system_wide_test
+arm_cs_etm_snapshot_test
+
+# Test all combinations of per-thread, system-wide and normal mode with
+# and without timestamps
+arm_cs_etm_basic_test -e cs_etm/timestamp=0/ --per-thread
+arm_cs_etm_basic_test -e cs_etm/timestamp=1/ --per-thread
+arm_cs_etm_basic_test -e cs_etm/timestamp=0/ -a
+arm_cs_etm_basic_test -e cs_etm/timestamp=1/ -a
+arm_cs_etm_basic_test -e cs_etm/timestamp=0/
+arm_cs_etm_basic_test -e cs_etm/timestamp=1/
+
+exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
new file mode 100755
index 0000000000..03d5c7d12e
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -0,0 +1,113 @@
+#!/bin/sh
+# Check Arm SPE trace data recording and synthesized samples
+
+# Uses the 'perf record' to record trace data of Arm SPE events;
+# then verify if any SPE event samples are generated by SPE with
+# 'perf script' and 'perf report' commands.
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2021
+
+skip_if_no_arm_spe_event() {
+ perf list | grep -E -q 'arm_spe_[0-9]+//' && return 0
+
+ # arm_spe event doesn't exist
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+glb_err=0
+
+cleanup_files()
+{
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ exit $glb_err
+}
+
+trap cleanup_files EXIT TERM INT
+
+arm_spe_report() {
+ if [ $2 = 0 ]; then
+ echo "$1: PASS"
+ elif [ $2 = 2 ]; then
+ echo "$1: SKIPPED"
+ else
+ echo "$1: FAIL"
+ glb_err=$2
+ fi
+}
+
+perf_script_samples() {
+ echo "Looking at perf.data file for dumping samples:"
+
+ # from arm-spe.c/arm_spe_synth_events()
+ events="(ld1-miss|ld1-access|llc-miss|lld-access|tlb-miss|tlb-access|branch-miss|remote-access|memory)"
+
+ # Below is an example of the samples dumping:
+ # dd 3048 [002] 1 l1d-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 tlb-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 memory: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ perf script -F,-time -i ${perfdata} 2>&1 | \
+ grep -E " +$1 +[0-9]+ .* +${events}:(.*:)? +" > /dev/null 2>&1
+}
+
+perf_report_samples() {
+ echo "Looking at perf.data file for reporting samples:"
+
+ # Below is an example of the samples reporting:
+ # 73.04% 73.04% dd libc-2.27.so [.] _dl_addr
+ # 7.71% 7.71% dd libc-2.27.so [.] getenv
+ # 2.59% 2.59% dd ld-2.27.so [.] strcmp
+ perf report --stdio -i ${perfdata} 2>&1 | \
+ grep -E " +[0-9]+\.[0-9]+% +[0-9]+\.[0-9]+% +$1 " > /dev/null 2>&1
+}
+
+arm_spe_snapshot_test() {
+ echo "Recording trace with snapshot mode $perfdata"
+ perf record -o ${perfdata} -e arm_spe// -S \
+ -- dd if=/dev/zero of=/dev/null > /dev/null 2>&1 &
+ PERFPID=$!
+
+ # Wait for perf program
+ sleep 1
+
+ # Send signal to snapshot trace data
+ kill -USR2 $PERFPID
+
+ # Stop perf program
+ kill $PERFPID
+ wait $PERFPID
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE snapshot testing" $err
+}
+
+arm_spe_system_wide_test() {
+ echo "Recording trace with system-wide mode $perfdata"
+
+ perf record -o - -e dummy -a -B true > /dev/null 2>&1
+ if [ $? != 0 ]; then
+ arm_spe_report "SPE system-wide testing" 2
+ return
+ fi
+
+ perf record -o ${perfdata} -e arm_spe// -a --no-bpf-event \
+ -- dd if=/dev/zero of=/dev/null count=100000 > /dev/null 2>&1
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE system-wide testing" $err
+}
+
+arm_spe_snapshot_test
+arm_spe_system_wide_test
+
+exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
new file mode 100755
index 0000000000..1a7e6a82d0
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+# Check Arm SPE doesn't hang when there are forks
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+skip_if_no_arm_spe_event() {
+ perf list | grep -E -q 'arm_spe_[0-9]+//' && return 0
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+TEST_PROGRAM="perf test -w sqrtloop 10"
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_RECORD_LOG=$(mktemp /tmp/__perf_test.log.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_RECORD_LOG}
+ rm -f ${PERF_DATA}
+}
+
+trap cleanup_files EXIT TERM INT
+
+echo "Recording workload..."
+perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
+PERFPID=$!
+
+# Check if perf hangs by checking the perf-record logs.
+sleep 1
+log0=$(wc -l $PERF_RECORD_LOG)
+echo Log lines = $log0
+sleep 1
+log1=$(wc -l $PERF_RECORD_LOG)
+echo Log lines after 1 second = $log1
+
+kill $PERFPID
+wait $PERFPID
+
+if [ "$log0" = "$log1" ];
+then
+ echo "SPE hang test: FAIL"
+ exit 1
+else
+ echo "SPE hang test: PASS"
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
new file mode 100755
index 0000000000..09908d71c9
--- /dev/null
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -0,0 +1,76 @@
+#!/bin/sh
+# Check branch stack sampling
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+# skip the test if the hardware doesn't support branch stack sampling
+# and if the architecture doesn't support filter types: any,save_type,u
+if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then
+ echo "skip: system doesn't support filter types: any,save_type,u"
+ exit 2
+fi
+
+TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
+TESTPROG="perf test -w brstack"
+
+cleanup() {
+ rm -rf $TMPDIR
+}
+
+trap cleanup EXIT TERM INT
+
+test_user_branches() {
+ echo "Testing user branch stack sampling"
+
+ perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- ${TESTPROG} > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+
+ # example of branch entries:
+ # brstack_foo+0x14/brstack_bar+0x40/P/-/-/0/CALL
+
+ set -x
+ grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bar\+[^ ]*/CALL/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_bar\+[^ ]*/brstack_foo\+[^ ]*/RET/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_foo\+[^ ]*/brstack_bench\+[^ ]*/RET/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack_bench\+[^ ]*/brstack_bench\+[^ ]*/COND/.*$" $TMPDIR/perf.script
+ grep -E -m1 "^brstack\+[^ ]*/brstack\+[^ ]*/UNCOND/.*$" $TMPDIR/perf.script
+ set +x
+
+ # some branch types are still not being tested:
+ # IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
+}
+
+# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"
+# second argument are the expected branch types for the given filter
+test_filter() {
+ test_filter_filter=$1
+ test_filter_expect=$2
+
+ echo "Testing branch stack filtering permutation ($test_filter_filter,$test_filter_expect)"
+
+ perf record -o $TMPDIR/perf.data --branch-filter $test_filter_filter,save_type,u -- ${TESTPROG} > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+
+ # fail if we find any branch type that doesn't match any of the expected ones
+ # also consider UNKNOWN branch types (-)
+ if grep -E -vm1 "^[^ ]*/($test_filter_expect|-|( *))/.*$" $TMPDIR/perf.script; then
+ return 1
+ fi
+}
+
+set -e
+
+test_user_branches
+
+test_filter "any_call" "CALL|IND_CALL|COND_CALL|SYSCALL|IRQ"
+test_filter "call" "CALL|SYSCALL"
+test_filter "cond" "COND"
+test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
+
+test_filter "call,cond" "CALL|SYSCALL|COND"
+test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
+test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
new file mode 100755
index 0000000000..69bb6fe86c
--- /dev/null
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+# Test data symbol
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2022
+
+skip_if_no_mem_event() {
+ perf mem record -e list 2>&1 | grep -E -q 'available' && return 0
+ return 2
+}
+
+skip_if_no_mem_event || exit 2
+
+TEST_PROGRAM="perf test -w datasym"
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+check_result() {
+ # The memory report format is as below:
+ # 99.92% ... [.] buf1+0x38
+ result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 |
+ awk '/buf1/ { print $4 }')
+
+ # Testing is failed if has no any sample for "buf1"
+ [ -z "$result" ] && return 1
+
+ while IFS= read -r line; do
+ # The "data1" and "data2" fields in structure "buf1" have
+ # offset "0x0" and "0x38", returns failure if detect any
+ # other offset value.
+ if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then
+ return 1
+ fi
+ done <<< "$result"
+
+ return 0
+}
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA}
+}
+
+trap cleanup_files exit term int
+
+echo "Recording workload..."
+
+# perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't support
+# user/kernel filtering and per-process monitoring, spin program on
+# specific CPU and test in per-CPU mode.
+is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
+if (($is_amd >= 1)); then
+ perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM &
+else
+ perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM &
+fi
+
+PERFPID=$!
+
+sleep 1
+
+kill $PERFPID
+wait $PERFPID
+
+check_result
+exit $?
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
new file mode 100755
index 0000000000..3a8b9bffa0
--- /dev/null
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -0,0 +1,687 @@
+#!/bin/sh
+# Miscellaneous Intel PT testing
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Skip if no Intel PT
+perf list | grep -q 'intel_pt//' || exit 2
+
+shelldir=$(dirname "$0")
+. "${shelldir}"/lib/waiting.sh
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+temp_dir=$(mktemp -d /tmp/perf-test-intel-pt-sh.XXXXXXXXXX)
+
+tmpfile="${temp_dir}/tmp-perf.data"
+perfdatafile="${temp_dir}/test-perf.data"
+outfile="${temp_dir}/test-out.txt"
+errfile="${temp_dir}/test-err.txt"
+workload="${temp_dir}/workload"
+awkscript="${temp_dir}/awkscript"
+jitdump_workload="${temp_dir}/jitdump_workload"
+maxbrstack="${temp_dir}/maxbrstack.py"
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-26)
+ if [ "${sane}" = "/tmp/perf-test-intel-pt-sh" ] ; then
+ echo "--- Cleaning up ---"
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+# perf record for testing without decoding
+perf_record_no_decode()
+{
+ # Options to speed up recording: no post-processing, no build-id cache update,
+ # and no BPF events.
+ perf record -B -N --no-bpf-event "$@"
+}
+
+# perf record for testing should not need BPF events
+perf_record_no_bpf()
+{
+ # Options for no BPF events
+ perf record --no-bpf-event "$@"
+}
+
+have_workload=false
+cat << _end_of_file_ | /usr/bin/cc -o "${workload}" -xc - -pthread && have_workload=true
+#include <time.h>
+#include <pthread.h>
+
+void work(void) {
+ struct timespec tm = {
+ .tv_nsec = 1000000,
+ };
+ int i;
+
+ /* Run for about 30 seconds */
+ for (i = 0; i < 30000; i++)
+ nanosleep(&tm, NULL);
+}
+
+void *threadfunc(void *arg) {
+ work();
+ return NULL;
+}
+
+int main(void) {
+ pthread_t th;
+
+ pthread_create(&th, NULL, threadfunc, NULL);
+ work();
+ pthread_join(th, NULL);
+ return 0;
+}
+_end_of_file_
+
+can_cpu_wide()
+{
+ echo "Checking for CPU-wide recording on CPU $1"
+ if ! perf_record_no_decode -o "${tmpfile}" -e dummy:u -C "$1" true >/dev/null 2>&1 ; then
+ echo "No so skipping"
+ return 2
+ fi
+ echo OK
+ return 0
+}
+
+test_system_wide_side_band()
+{
+ echo "--- Test system-wide sideband ---"
+
+ # Need CPU 0 and CPU 1
+ can_cpu_wide 0 || return $?
+ can_cpu_wide 1 || return $?
+
+ # Record on CPU 0 a task running on CPU 1
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
+
+ # Should get MMAP events from CPU 1 because they can be needed to decode
+ mmap_cnt=$(perf script -i "${perfdatafile}" --no-itrace --show-mmap-events -C 1 2>/dev/null | grep -c MMAP)
+
+ if [ "${mmap_cnt}" -gt 0 ] ; then
+ echo OK
+ return 0
+ fi
+
+ echo "Failed to record MMAP events on CPU 1 when tracing CPU 0"
+ return 1
+}
+
+can_kernel()
+{
+ if [ -z "${can_kernel_trace}" ] ; then
+ can_kernel_trace=0
+ perf_record_no_decode -o "${tmpfile}" -e dummy:k true >/dev/null 2>&1 && can_kernel_trace=1
+ fi
+ if [ ${can_kernel_trace} -eq 0 ] ; then
+ echo "SKIP: no kernel tracing"
+ return 2
+ fi
+ return 0
+}
+
+test_per_thread()
+{
+ k="$1"
+ desc="$2"
+
+ echo "--- Test per-thread ${desc}recording ---"
+
+ if ! $have_workload ; then
+ echo "No workload, so skipping"
+ return 2
+ fi
+
+ if [ "${k}" = "k" ] ; then
+ can_kernel || return 2
+ fi
+
+ cat <<- "_end_of_file_" > "${awkscript}"
+ BEGIN {
+ s = "[ ]*"
+ u = s"[0-9]+"s
+ d = s"[0-9-]+"s
+ x = s"[0-9a-fA-FxX]+"s
+ mmapping = "idx"u": mmapping fd"u
+ set_output = "idx"u": set output fd"u"->"u
+ perf_event_open = "sys_perf_event_open: pid"d"cpu"d"group_fd"d"flags"x"="u
+ }
+
+ /perf record opening and mmapping events/ {
+ if (!done)
+ active = 1
+ }
+
+ /perf record done opening and mmapping events/ {
+ active = 0
+ done = 1
+ }
+
+ $0 ~ perf_event_open && active {
+ match($0, perf_event_open)
+ $0 = substr($0, RSTART, RLENGTH)
+ pid = $3
+ cpu = $5
+ fd = $11
+ print "pid " pid " cpu " cpu " fd " fd " : " $0
+ fd_array[fd] = fd
+ pid_array[fd] = pid
+ cpu_array[fd] = cpu
+ }
+
+ $0 ~ mmapping && active {
+ match($0, mmapping)
+ $0 = substr($0, RSTART, RLENGTH)
+ fd = $5
+ print "fd " fd " : " $0
+ if (fd in fd_array) {
+ mmap_array[fd] = 1
+ } else {
+ print "Unknown fd " fd
+ exit 1
+ }
+ }
+
+ $0 ~ set_output && active {
+ match($0, set_output)
+ $0 = substr($0, RSTART, RLENGTH)
+ fd = $6
+ fd_to = $8
+ print "fd " fd " fd_to " fd_to " : " $0
+ if (fd in fd_array) {
+ if (fd_to in fd_array) {
+ set_output_array[fd] = fd_to
+ } else {
+ print "Unknown fd " fd_to
+ exit 1
+ }
+ } else {
+ print "Unknown fd " fd
+ exit 1
+ }
+ }
+
+ END {
+ print "Checking " length(fd_array) " fds"
+ for (fd in fd_array) {
+ if (fd in mmap_array) {
+ pid = pid_array[fd]
+ if (pid != -1) {
+ if (pid in pids) {
+ print "More than 1 mmap for PID " pid
+ exit 1
+ }
+ pids[pid] = 1
+ }
+ cpu = cpu_array[fd]
+ if (cpu != -1) {
+ if (cpu in cpus) {
+ print "More than 1 mmap for CPU " cpu
+ exit 1
+ }
+ cpus[cpu] = 1
+ }
+ } else if (!(fd in set_output_array)) {
+ print "No mmap for fd " fd
+ exit 1
+ }
+ }
+ n = length(pids)
+ if (n != thread_cnt) {
+ print "Expected " thread_cnt " per-thread mmaps - found " n
+ exit 1
+ }
+ }
+ _end_of_file_
+
+ $workload &
+ w1=$!
+ $workload &
+ w2=$!
+ echo "Workload PIDs are $w1 and $w2"
+ wait_for_threads ${w1} 2
+ wait_for_threads ${w2} 2
+
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u"${k}" -vvv --per-thread -p "${w1},${w2}" 2>"${errfile}" >"${outfile}" &
+ ppid=$!
+ echo "perf PID is $ppid"
+ wait_for_perf_to_start ${ppid} "${errfile}" || return 1
+
+ kill ${w1}
+ wait_for_process_to_exit ${w1} || return 1
+ is_running ${ppid} || return 1
+
+ kill ${w2}
+ wait_for_process_to_exit ${w2} || return 1
+ wait_for_process_to_exit ${ppid} || return 1
+
+ awk -v thread_cnt=4 -f "${awkscript}" "${errfile}" || return 1
+
+ echo OK
+ return 0
+}
+
+test_jitdump()
+{
+ echo "--- Test tracing self-modifying code that uses jitdump ---"
+
+ script_path=$(realpath "$0")
+ script_dir=$(dirname "$script_path")
+ jitdump_incl_dir="${script_dir}/../../util"
+ jitdump_h="${jitdump_incl_dir}/jitdump.h"
+
+ if [ ! -e "${jitdump_h}" ] ; then
+ echo "SKIP: Include file jitdump.h not found"
+ return 2
+ fi
+
+ if [ -z "${have_jitdump_workload}" ] ; then
+ have_jitdump_workload=false
+ # Create a workload that uses self-modifying code and generates its own jitdump file
+ cat <<- "_end_of_file_" | /usr/bin/cc -o "${jitdump_workload}" -I "${jitdump_incl_dir}" -xc - -pthread && have_jitdump_workload=true
+ #define _GNU_SOURCE
+ #include <sys/mman.h>
+ #include <sys/types.h>
+ #include <stddef.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <unistd.h>
+ #include <string.h>
+
+ #include "jitdump.h"
+
+ #define CHK_BYTE 0x5a
+
+ static inline uint64_t rdtsc(void)
+ {
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((uint64_t)high) << 32;
+ }
+
+ static FILE *open_jitdump(void)
+ {
+ struct jitheader header = {
+ .magic = JITHEADER_MAGIC,
+ .version = JITHEADER_VERSION,
+ .total_size = sizeof(header),
+ .pid = getpid(),
+ .timestamp = rdtsc(),
+ .flags = JITDUMP_FLAGS_ARCH_TIMESTAMP,
+ };
+ char filename[256];
+ FILE *f;
+ void *m;
+
+ snprintf(filename, sizeof(filename), "jit-%d.dump", getpid());
+ f = fopen(filename, "w+");
+ if (!f)
+ goto err;
+ /* Create an MMAP event for the jitdump file. That is how perf tool finds it. */
+ m = mmap(0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ if (m == MAP_FAILED)
+ goto err_close;
+ munmap(m, 4096);
+ if (fwrite(&header,sizeof(header),1,f) != 1)
+ goto err_close;
+ return f;
+
+ err_close:
+ fclose(f);
+ err:
+ return NULL;
+ }
+
+ static int write_jitdump(FILE *f, void *addr, const uint8_t *dat, size_t sz, uint64_t *idx)
+ {
+ struct jr_code_load rec = {
+ .p.id = JIT_CODE_LOAD,
+ .p.total_size = sizeof(rec) + sz,
+ .p.timestamp = rdtsc(),
+ .pid = getpid(),
+ .tid = gettid(),
+ .vma = (unsigned long)addr,
+ .code_addr = (unsigned long)addr,
+ .code_size = sz,
+ .code_index = ++*idx,
+ };
+
+ if (fwrite(&rec,sizeof(rec),1,f) != 1 ||
+ fwrite(dat, sz, 1, f) != 1)
+ return -1;
+ return 0;
+ }
+
+ static void close_jitdump(FILE *f)
+ {
+ fclose(f);
+ }
+
+ int main()
+ {
+ /* Get a memory page to store executable code */
+ void *addr = mmap(0, 4096, PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ /* Code to execute: mov CHK_BYTE, %eax ; ret */
+ uint8_t dat[] = {0xb8, CHK_BYTE, 0x00, 0x00, 0x00, 0xc3};
+ FILE *f = open_jitdump();
+ uint64_t idx = 0;
+ int ret = 1;
+
+ if (!f)
+ return 1;
+ /* Copy executable code to executable memory page */
+ memcpy(addr, dat, sizeof(dat));
+ /* Record it in the jitdump file */
+ if (write_jitdump(f, addr, dat, sizeof(dat), &idx))
+ goto out_close;
+ /* Call it */
+ ret = ((int (*)(void))addr)() - CHK_BYTE;
+ out_close:
+ close_jitdump(f);
+ return ret;
+ }
+ _end_of_file_
+ fi
+
+ if ! $have_jitdump_workload ; then
+ echo "SKIP: No jitdump workload"
+ return 2
+ fi
+
+ # Change to temp_dir so jitdump collateral files go there
+ cd "${temp_dir}"
+ perf_record_no_bpf -o "${tmpfile}" -e intel_pt//u "${jitdump_workload}"
+ perf inject -i "${tmpfile}" -o "${perfdatafile}" --jit
+ decode_br_cnt=$(perf script -i "${perfdatafile}" --itrace=b | wc -l)
+ # Note that overflow and lost errors are suppressed for the error count
+ decode_err_cnt=$(perf script -i "${perfdatafile}" --itrace=e-o-l | grep -ci error)
+ cd -
+ # Should be thousands of branches
+ if [ "${decode_br_cnt}" -lt 1000 ] ; then
+ echo "Decode failed, only ${decode_br_cnt} branches"
+ return 1
+ fi
+ # Should be no errors
+ if [ "${decode_err_cnt}" -ne 0 ] ; then
+ echo "Decode failed, ${decode_err_cnt} errors"
+ perf script -i "${perfdatafile}" --itrace=e-o-l --show-mmap-events | cat
+ return 1
+ fi
+
+ echo OK
+ return 0
+}
+
+test_packet_filter()
+{
+ echo "--- Test with MTC and TSC disabled ---"
+ # Disable MTC and TSC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/mtc=0,tsc=0/u uname
+ # Should not get MTC packet
+ mtc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "MTC 0x")
+ if [ "${mtc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with mtc=0"
+ return 1
+ fi
+ # Should not get TSC package
+ tsc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TSC 0x")
+ if [ "${tsc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with tsc=0"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_disable_branch()
+{
+ echo "--- Test with branches disabled ---"
+ # Disable branch
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/branch=0/u uname
+ # Should not get branch related packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TNT 0x")
+ tip_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TIP 0x")
+ fup_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "FUP 0x")
+ if [ "${tnt_cnt}" -ne 0 ] || [ "${tip_cnt}" -ne 0 ] || [ "${fup_cnt}" -ne 0 ] ; then
+ echo "Failed to disable branches"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_time_cyc()
+{
+ echo "--- Test with/without CYC ---"
+ # Check if CYC is supported
+ cyc=$(cat /sys/bus/event_source/devices/intel_pt/caps/psb_cyc)
+ if [ "${cyc}" != "1" ] ; then
+ echo "SKIP: CYC is not supported"
+ return 2
+ fi
+ # Enable CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/cyc/u uname
+ # should get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" = "0" ] ; then
+ echo "Failed to get CYC packet"
+ return 1
+ fi
+ # Without CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u uname
+ # Should not get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" -gt 0 ] ; then
+ echo "Still get CYC packet without cyc"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_sample()
+{
+ echo "--- Test recording with sample mode ---"
+ # Check if recording with sample mode is working
+ if ! perf_record_no_decode -o "${perfdatafile}" --aux-sample=8192 -e '{intel_pt//u,branch-misses:u}' uname ; then
+ echo "perf record failed with --aux-sample"
+ return 1
+ fi
+ # Check with event with PMU name
+ if perf_record_no_decode -o "${perfdatafile}" -e br_misp_retired.all_branches:u uname ; then
+ if ! perf_record_no_decode -o "${perfdatafile}" -e '{intel_pt//,br_misp_retired.all_branches/aux-sample-size=8192/}:u' uname ; then
+ echo "perf record failed with --aux-sample-size"
+ return 1
+ fi
+ fi
+ echo OK
+ return 0
+}
+
+test_kernel_trace()
+{
+ echo "--- Test with kernel trace ---"
+ # Check if recording with kernel trace is working
+ can_kernel || return 2
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt//k -m1,128 uname ; then
+ echo "perf record failed with intel_pt//k"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_virtual_lbr()
+{
+ echo "--- Test virtual LBR ---"
+ # Check if python script is supported
+ libpython=$(perf version --build-options | grep python | grep -cv OFF)
+ if [ "${libpython}" != "1" ] ; then
+ echo "SKIP: python scripting is not supported"
+ return 2
+ fi
+
+ # Python script to determine the maximum size of branch stacks
+ cat << "_end_of_file_" > "${maxbrstack}"
+from __future__ import print_function
+
+bmax = 0
+
+def process_event(param_dict):
+ if "brstack" in param_dict:
+ brstack = param_dict["brstack"]
+ n = len(brstack)
+ global bmax
+ if n > bmax:
+ bmax = n
+
+def trace_end():
+ print("max brstack", bmax)
+_end_of_file_
+
+ # Check if virtual lbr is working
+ perf_record_no_bpf -o "${perfdatafile}" --aux-sample -e '{intel_pt//,cycles}:u' uname
+ times_val=$(perf script -i "${perfdatafile}" --itrace=L -s "${maxbrstack}" 2>/dev/null | grep "max brstack " | cut -d " " -f 3)
+ case "${times_val}" in
+ [0-9]*) ;;
+ *) times_val=0;;
+ esac
+ if [ "${times_val}" -lt 2 ] ; then
+ echo "Failed with virtual lbr"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_power_event()
+{
+ echo "--- Test power events ---"
+ # Check if power events are supported
+ power_event=$(cat /sys/bus/event_source/devices/intel_pt/caps/power_event_trace)
+ if [ "${power_event}" != "1" ] ; then
+ echo "SKIP: power_event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -a -e intel_pt/pwr_evt/u uname ; then
+ echo "perf record failed with pwr_evt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_no_tnt()
+{
+ echo "--- Test with TNT packets disabled ---"
+ # Check if TNT disable is supported
+ notnt=$(cat /sys/bus/event_source/devices/intel_pt/caps/tnt_disable)
+ if [ "${notnt}" != "1" ] ; then
+ echo "SKIP: tnt_disable is not supported"
+ return 2
+ fi
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/notnt/u uname
+ # Should be no TNT packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D | grep -c TNT)
+ if [ "${tnt_cnt}" -ne 0 ] ; then
+ echo "TNT packets still there after notnt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_event_trace()
+{
+ echo "--- Test with event_trace ---"
+ # Check if event_trace is supported
+ event_trace=$(cat /sys/bus/event_source/devices/intel_pt/caps/event_trace)
+ if [ "${event_trace}" != 1 ] ; then
+ echo "SKIP: event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/event/u uname ; then
+ echo "perf record failed with event trace"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_pipe()
+{
+ echo "--- Test with pipe mode ---"
+ # Check if it works with pipe
+ if ! perf_record_no_bpf -o- -e intel_pt//u uname | perf report -q -i- --itrace=i10000 ; then
+ echo "perf record + report failed with pipe mode"
+ return 1
+ fi
+ if ! perf_record_no_bpf -o- -e intel_pt//u uname | perf inject -b > /dev/null ; then
+ echo "perf record + inject failed with pipe mode"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+count_result()
+{
+ if [ "$1" -eq 2 ] ; then
+ skip_cnt=$((skip_cnt + 1))
+ return
+ fi
+ if [ "$1" -eq 0 ] ; then
+ ok_cnt=$((ok_cnt + 1))
+ return
+ fi
+ err_cnt=$((err_cnt + 1))
+}
+
+ret=0
+test_system_wide_side_band || ret=$? ; count_result $ret ; ret=0
+test_per_thread "" "" || ret=$? ; count_result $ret ; ret=0
+test_per_thread "k" "(incl. kernel) " || ret=$? ; count_result $ret ; ret=0
+test_jitdump || ret=$? ; count_result $ret ; ret=0
+test_packet_filter || ret=$? ; count_result $ret ; ret=0
+test_disable_branch || ret=$? ; count_result $ret ; ret=0
+test_time_cyc || ret=$? ; count_result $ret ; ret=0
+test_sample || ret=$? ; count_result $ret ; ret=0
+test_kernel_trace || ret=$? ; count_result $ret ; ret=0
+test_virtual_lbr || ret=$? ; count_result $ret ; ret=0
+test_power_event || ret=$? ; count_result $ret ; ret=0
+test_no_tnt || ret=$? ; count_result $ret ; ret=0
+test_event_trace || ret=$? ; count_result $ret ; ret=0
+test_pipe || ret=$? ; count_result $ret ; ret=0
+
+cleanup
+
+echo "--- Done ---"
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+exit 2
diff --git a/tools/perf/tests/shell/test_java_symbol.sh b/tools/perf/tests/shell/test_java_symbol.sh
new file mode 100755
index 0000000000..499539d1c4
--- /dev/null
+++ b/tools/perf/tests/shell/test_java_symbol.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# Test java symbol
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2022
+
+# skip if there's no jshell
+if ! [ -x "$(command -v jshell)" ]; then
+ echo "skip: no jshell, install JDK"
+ exit 2
+fi
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_INJ_DATA=$(mktemp /tmp/__perf_test.perf.data.inj.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA}
+ rm -f ${PERF_INJ_DATA}
+}
+
+trap cleanup_files exit term int
+
+if [ -e "$PWD/tools/perf/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PWD/tools/perf/libperf-jvmti.so
+elif [ -e "$PWD/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PWD/libperf-jvmti.so
+elif [ -e "$PREFIX/lib64/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PREFIX/lib64/libperf-jvmti.so
+elif [ -e "$PREFIX/lib/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PREFIX/lib/libperf-jvmti.so
+elif [ -e "/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so" ]; then
+ LIBJVMTI=/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so
+else
+ echo "Fail to find libperf-jvmti.so"
+ # JVMTI is a build option, skip the test if fail to find lib
+ exit 2
+fi
+
+cat <<EOF | perf record -k 1 -o $PERF_DATA jshell -s -J-agentpath:$LIBJVMTI
+int fib(int x) {
+ return x > 1 ? fib(x - 2) + fib(x - 1) : 1;
+}
+
+int q = 0;
+
+for (int i = 0; i < 10; i++)
+ q += fib(i);
+
+System.out.println(q);
+EOF
+
+if [ $? -ne 0 ]; then
+ echo "Fail to record for java program"
+ exit 1
+fi
+
+if ! DEBUGINFOD_URLS='' perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
+ echo "Fail to inject samples"
+ exit 1
+fi
+
+# Below is an example of the instruction samples reporting:
+# 8.18% jshell jitted-50116-29.so [.] Interpreter
+# 0.75% Thread-1 jitted-83602-1670.so [.] jdk.internal.jimage.BasicImageReader.getString(int)
+perf report --stdio -i ${PERF_INJ_DATA} 2>&1 | \
+ grep -E " +[0-9]+\.[0-9]+% .* (Interpreter|jdk\.internal).*" > /dev/null 2>&1
+
+if [ $? -ne 0 ]; then
+ echo "Fail to find java symbols"
+ exit 1
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh
new file mode 100755
index 0000000000..6ded58f98f
--- /dev/null
+++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh
@@ -0,0 +1,72 @@
+#!/bin/bash
+# 'perf data convert --to-json' command test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+
+if [ "$PYTHON" = "" ] ; then
+ if which python3 > /dev/null ; then
+ PYTHON=python3
+ elif which python > /dev/null ; then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+result=$(mktemp /tmp/__perf_test.output.json.XXXXX)
+
+cleanup()
+{
+ rm -f "${perfdata}"
+ rm -f "${result}"
+ trap - exit term int
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit ${err}
+}
+trap trap_cleanup exit term int
+
+test_json_converter_command()
+{
+ echo "Testing Perf Data Convertion Command to JSON"
+ perf record -o "$perfdata" -F 99 -g -- perf test -w noploop > /dev/null 2>&1
+ perf data convert --to-json "$result" --force -i "$perfdata" >/dev/null 2>&1
+ if [ "$(cat ${result} | wc -l)" -gt "0" ] ; then
+ echo "Perf Data Converter Command to JSON [SUCCESS]"
+ else
+ echo "Perf Data Converter Command to JSON [FAILED]"
+ err=1
+ exit
+ fi
+}
+
+validate_json_format()
+{
+ echo "Validating Perf Data Converted JSON file"
+ if [ -f "$result" ] ; then
+ if $PYTHON -c "import json; json.load(open('$result'))" >/dev/null 2>&1 ; then
+ echo "The file contains valid JSON format [SUCCESS]"
+ else
+ echo "The file does not contain valid JSON format [FAILED]"
+ err=1
+ exit
+ fi
+ else
+ echo "File not found [FAILED]"
+ err=2
+ exit
+ fi
+}
+
+test_json_converter_command
+validate_json_format
+
+exit ${err}
diff --git a/tools/perf/tests/shell/test_task_analyzer.sh b/tools/perf/tests/shell/test_task_analyzer.sh
new file mode 100755
index 0000000000..92d15154ba
--- /dev/null
+++ b/tools/perf/tests/shell/test_task_analyzer.sh
@@ -0,0 +1,175 @@
+#!/bin/bash
+# perf script task-analyzer tests
+# SPDX-License-Identifier: GPL-2.0
+
+tmpdir=$(mktemp -d /tmp/perf-script-task-analyzer-XXXXX)
+err=0
+
+# set PERF_EXEC_PATH to find scripts in the source directory
+perfdir=$(dirname "$0")/../..
+if [ -e "$perfdir/scripts/python/Perf-Trace-Util" ]; then
+ export PERF_EXEC_PATH=$perfdir
+fi
+
+cleanup() {
+ rm -f perf.data
+ rm -f perf.data.old
+ rm -f csv
+ rm -f csvsummary
+ rm -rf "$tmpdir"
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+report() {
+ if [ "$1" = 0 ]; then
+ echo "PASS: \"$2\""
+ else
+ echo "FAIL: \"$2\" Error message: \"$3\""
+ err=1
+ fi
+}
+
+check_exec_0() {
+ if [ $? != 0 ]; then
+ report 1 "invocation of $1 command failed"
+ fi
+}
+
+find_str_or_fail() {
+ grep -q "$1" "$2"
+ if [ "$?" != 0 ]; then
+ report 1 "$3" "Failed to find required string:'${1}'."
+ else
+ report 0 "$3"
+ fi
+}
+
+# check if perf is compiled with libtraceevent support
+skip_no_probe_record_support() {
+ perf version --build-options | grep -q " OFF .* HAVE_LIBTRACEEVENT" && return 2
+ return 0
+}
+
+prepare_perf_data() {
+ # 1s should be sufficient to catch at least some switches
+ perf record -e sched:sched_switch -a -- sleep 1 > /dev/null 2>&1
+ # check if perf data file got created in above step.
+ if [ ! -e "perf.data" ]; then
+ printf "FAIL: perf record failed to create \"perf.data\" \n"
+ return 1
+ fi
+}
+
+# check standard inkvokation with no arguments
+test_basic() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer > "$out"
+ check_exec_0 "perf script report task-analyzer"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
+}
+
+test_ns_rename(){
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --ns --rename-comms-by-tids 0:random > "$out"
+ check_exec_0 "perf script report task-analyzer --ns --rename-comms-by-tids 0:random"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
+}
+
+test_ms_filtertasks_highlight(){
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf \
+ > "$out"
+ check_exec_0 "perf script report task-analyzer --ms --filter-tasks perf --highlight-tasks perf"
+ find_str_or_fail "Comm" "$out" "${FUNCNAME[0]}"
+}
+
+test_extended_times_timelimit_limittasks() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --extended-times --time-limit :99999 \
+ --limit-to-tasks perf > "$out"
+ check_exec_0 "perf script report task-analyzer --extended-times --time-limit :99999 --limit-to-tasks perf"
+ find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}"
+}
+
+test_summary() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --summary > "$out"
+ check_exec_0 "perf script report task-analyzer --summary"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
+}
+
+test_summaryextended() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --summary-extended > "$out"
+ check_exec_0 "perf script report task-analyzer --summary-extended"
+ find_str_or_fail "Inter Task Times" "$out" "${FUNCNAME[0]}"
+}
+
+test_summaryonly() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --summary-only > "$out"
+ check_exec_0 "perf script report task-analyzer --summary-only"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
+}
+
+test_extended_times_summary_ns() {
+ out="$tmpdir/perf.out"
+ perf script report task-analyzer --extended-times --summary --ns > "$out"
+ check_exec_0 "perf script report task-analyzer --extended-times --summary --ns"
+ find_str_or_fail "Out-Out" "$out" "${FUNCNAME[0]}"
+ find_str_or_fail "Summary" "$out" "${FUNCNAME[0]}"
+}
+
+test_csv() {
+ perf script report task-analyzer --csv csv > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv csv"
+ find_str_or_fail "Comm;" csv "${FUNCNAME[0]}"
+}
+
+test_csv_extended_times() {
+ perf script report task-analyzer --csv csv --extended-times > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv csv --extended-times"
+ find_str_or_fail "Out-Out;" csv "${FUNCNAME[0]}"
+}
+
+test_csvsummary() {
+ perf script report task-analyzer --csv-summary csvsummary > /dev/null
+ check_exec_0 "perf script report task-analyzer --csv-summary csvsummary"
+ find_str_or_fail "Comm;" csvsummary "${FUNCNAME[0]}"
+}
+
+test_csvsummary_extended() {
+ perf script report task-analyzer --csv-summary csvsummary --summary-extended \
+ >/dev/null
+ check_exec_0 "perf script report task-analyzer --csv-summary csvsummary --summary-extended"
+ find_str_or_fail "Out-Out;" csvsummary "${FUNCNAME[0]}"
+}
+
+skip_no_probe_record_support
+err=$?
+if [ $err -ne 0 ]; then
+ echo "WARN: Skipping tests. No libtraceevent support"
+ cleanup
+ exit $err
+fi
+prepare_perf_data
+test_basic
+test_ns_rename
+test_ms_filtertasks_highlight
+test_extended_times_timelimit_limittasks
+test_summary
+test_summaryextended
+test_summaryonly
+test_extended_times_summary_ns
+test_csv
+test_csvsummary
+test_csv_extended_times
+test_csvsummary_extended
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
new file mode 100755
index 0000000000..319f36ebb9
--- /dev/null
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# test perf probe of function from different CU
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# skip if there's no gcc
+if ! [ -x "$(command -v gcc)" ]; then
+ echo "failed: no gcc compiler"
+ exit 2
+fi
+
+temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
+ echo "--- Cleaning up ---"
+ perf probe -x ${temp_dir}/testfile -d foo || true
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+cat > ${temp_dir}/testfile-foo.h << EOF
+struct t
+{
+ int *p;
+ int c;
+};
+
+extern int foo (int i, struct t *t);
+EOF
+
+cat > ${temp_dir}/testfile-foo.c << EOF
+#include "testfile-foo.h"
+
+int
+foo (int i, struct t *t)
+{
+ int j, res = 0;
+ for (j = 0; j < i && j < t->c; j++)
+ res += t->p[j];
+
+ return res;
+}
+EOF
+
+cat > ${temp_dir}/testfile-main.c << EOF
+#include "testfile-foo.h"
+
+static struct t g;
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int j[argc];
+ g.c = argc;
+ g.p = j;
+ for (i = 0; i < argc; i++)
+ j[i] = (int) argv[i][0];
+ return foo (3, &g);
+}
+EOF
+
+gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
+gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
+gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
+
+perf probe -x ${temp_dir}/testfile --funcs foo
+perf probe -x ${temp_dir}/testfile foo
+
+cleanup
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
new file mode 100755
index 0000000000..4014487cf4
--- /dev/null
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# Check open filename arg using perf trace + vfs_getname
+
+# Uses the 'perf test shell' library to add probe:vfs_getname to the system
+# then use it with 'perf trace' using 'touch' to write to a temp file, then
+# checks that that was captured by the vfs_getname was used by 'perf trace',
+# that already handles "probe:vfs_getname" if present, and used in the
+# "open" syscall "filename" argument beautifier.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. "$(dirname $0)"/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
+
+. "$(dirname $0)"/lib/probe_vfs_getname.sh
+
+trace_open_vfs_getname() {
+ evts="$(echo "$(perf list syscalls:sys_enter_open* 2>/dev/null | grep -E 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/')" | sed ':a;N;s:\n:,:g')"
+ perf trace -e $evts touch $file 2>&1 | \
+ grep -E " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +\"?${file}\"?, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+}
+
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+if [ $err -ne 0 ] ; then
+ exit $err
+fi
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+# Do not use whatever ~/.perfconfig file, it may change the output
+# via trace.{show_timestamp,show_prefix,etc}
+export PERF_CONFIG=/dev/null
+
+trace_open_vfs_getname
+err=$?
+rm -f ${file}
+cleanup_probe_vfs_getname
+exit $err