summaryrefslogtreecommitdiffstats
path: root/tools/perf/tests
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--tools/perf/tests/.gitignore5
-rw-r--r--tools/perf/tests/Build105
-rw-r--r--tools/perf/tests/api-io.c306
-rw-r--r--tools/perf/tests/attr.c211
-rw-r--r--tools/perf/tests/attr.py397
-rw-r--r--tools/perf/tests/attr/README71
-rw-r--r--tools/perf/tests/attr/base-record41
-rw-r--r--tools/perf/tests/attr/base-record-spe40
-rw-r--r--tools/perf/tests/attr/base-stat41
-rw-r--r--tools/perf/tests/attr/system-wide-dummy50
-rw-r--r--tools/perf/tests/attr/test-record-C022
-rw-r--r--tools/perf/tests/attr/test-record-basic6
-rw-r--r--tools/perf/tests/attr/test-record-branch-any8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_call8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-any_ret8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-hv8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-ind_call8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-k8
-rw-r--r--tools/perf/tests/attr/test-record-branch-filter-u8
-rw-r--r--tools/perf/tests/attr/test-record-count9
-rw-r--r--tools/perf/tests/attr/test-record-data10
-rw-r--r--tools/perf/tests/attr/test-record-freq7
-rw-r--r--tools/perf/tests/attr/test-record-graph-default9
-rw-r--r--tools/perf/tests/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-graph-dwarf12
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp9
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-group22
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling39
-rw-r--r--tools/perf/tests/attr/test-record-group123
-rw-r--r--tools/perf/tests/attr/test-record-group229
-rw-r--r--tools/perf/tests/attr/test-record-no-buffering9
-rw-r--r--tools/perf/tests/attr/test-record-no-inherit8
-rw-r--r--tools/perf/tests/attr/test-record-no-samples7
-rw-r--r--tools/perf/tests/attr/test-record-period8
-rw-r--r--tools/perf/tests/attr/test-record-pfm-period9
-rw-r--r--tools/perf/tests/attr/test-record-raw7
-rw-r--r--tools/perf/tests/attr/test-record-spe-period12
-rw-r--r--tools/perf/tests/attr/test-record-spe-period-term12
-rw-r--r--tools/perf/tests/attr/test-record-spe-physical-address12
-rw-r--r--tools/perf/tests/attr/test-stat-C010
-rw-r--r--tools/perf/tests/attr/test-stat-basic7
-rw-r--r--tools/perf/tests/attr/test-stat-default167
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-1208
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-2268
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-3288
-rw-r--r--tools/perf/tests/attr/test-stat-group17
-rw-r--r--tools/perf/tests/attr/test-stat-group117
-rw-r--r--tools/perf/tests/attr/test-stat-no-inherit8
-rw-r--r--tools/perf/tests/backward-ring-buffer.c171
-rw-r--r--tools/perf/tests/bitmap.c55
-rw-r--r--tools/perf/tests/bp_account.c222
-rw-r--r--tools/perf/tests/bp_signal.c293
-rw-r--r--tools/perf/tests/bp_signal_overflow.c142
-rw-r--r--tools/perf/tests/bpf-script-example.c60
-rw-r--r--tools/perf/tests/bpf-script-test-kbuild.c21
-rw-r--r--tools/perf/tests/bpf-script-test-prologue.c47
-rw-r--r--tools/perf/tests/bpf-script-test-relocation.c51
-rw-r--r--tools/perf/tests/bpf.c390
-rw-r--r--tools/perf/tests/builtin-test-list.c207
-rw-r--r--tools/perf/tests/builtin-test-list.h12
-rw-r--r--tools/perf/tests/builtin-test.c523
-rw-r--r--tools/perf/tests/clang.c32
-rw-r--r--tools/perf/tests/code-reading.c747
-rw-r--r--tools/perf/tests/cpumap.c176
-rw-r--r--tools/perf/tests/demangle-java-test.c44
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c45
-rw-r--r--tools/perf/tests/dlfilter-test.c418
-rw-r--r--tools/perf/tests/dso-data.c399
-rw-r--r--tools/perf/tests/dwarf-unwind.c242
-rw-r--r--tools/perf/tests/event-times.c243
-rw-r--r--tools/perf/tests/event_update.c120
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c125
-rw-r--r--tools/perf/tests/evsel-tp-sched.c92
-rw-r--r--tools/perf/tests/expand-cgroup.c225
-rw-r--r--tools/perf/tests/expr.c231
-rw-r--r--tools/perf/tests/fdarray.c163
-rw-r--r--tools/perf/tests/genelf.c53
-rw-r--r--tools/perf/tests/hists_common.c219
-rw-r--r--tools/perf/tests/hists_common.h76
-rw-r--r--tools/perf/tests/hists_cumulate.c740
-rw-r--r--tools/perf/tests/hists_filter.c329
-rw-r--r--tools/perf/tests/hists_link.c343
-rw-r--r--tools/perf/tests/hists_output.c627
-rw-r--r--tools/perf/tests/is_printable_array.c40
-rw-r--r--tools/perf/tests/keep-tracking.c164
-rw-r--r--tools/perf/tests/kmod-path.c163
-rw-r--r--tools/perf/tests/llvm.c219
-rw-r--r--tools/perf/tests/llvm.h31
-rw-r--r--tools/perf/tests/make412
-rw-r--r--tools/perf/tests/maps.c124
-rw-r--r--tools/perf/tests/mem.c60
-rw-r--r--tools/perf/tests/mem2node.c82
-rw-r--r--tools/perf/tests/mmap-basic.c305
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c240
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c138
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c147
-rw-r--r--tools/perf/tests/openat-syscall.c83
-rw-r--r--tools/perf/tests/parse-events.c2410
-rw-r--r--tools/perf/tests/parse-metric.c317
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c108
-rw-r--r--tools/perf/tests/pe-file-parsing.c100
-rw-r--r--tools/perf/tests/pe-file.c14
-rw-r--r--tools/perf/tests/pe-file.exebin0 -> 75595 bytes
-rw-r--r--tools/perf/tests/pe-file.exe.debugbin0 -> 141644 bytes
-rw-r--r--tools/perf/tests/perf-hooks.c49
-rw-r--r--tools/perf/tests/perf-record.c348
-rwxr-xr-xtools/perf/tests/perf-targz-src-pkg22
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c218
-rw-r--r--tools/perf/tests/pfm.c194
-rw-r--r--tools/perf/tests/pmu-events.c1063
-rw-r--r--tools/perf/tests/pmu.c181
-rw-r--r--tools/perf/tests/python-use.c27
-rw-r--r--tools/perf/tests/sample-parsing.c436
-rw-r--r--tools/perf/tests/sdt.c124
-rwxr-xr-xtools/perf/tests/shell/buildid.sh158
-rw-r--r--tools/perf/tests/shell/coresight/Makefile29
-rw-r--r--tools/perf/tests/shell/coresight/Makefile.miniconfig14
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh18
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/Makefile34
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S28
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c79
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh18
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/thread_loop.c86
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh19
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh19
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c74
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh18
-rwxr-xr-xtools/perf/tests/shell/daemon.sh487
-rw-r--r--tools/perf/tests/shell/lib/coresight.sh132
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py96
-rw-r--r--tools/perf/tests/shell/lib/probe.sh12
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh24
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh77
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh73
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh69
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh16
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh96
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh43
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh37
-rwxr-xr-xtools/perf/tests/shell/record.sh80
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh103
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh204
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh31
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh182
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh81
-rwxr-xr-xtools/perf/tests/shell/stat.sh99
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh43
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh23
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh45
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh79
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh68
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh188
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh113
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh92
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh118
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh93
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh663
-rwxr-xr-xtools/perf/tests/shell/test_java_symbol.sh75
-rw-r--r--tools/perf/tests/shell/test_uprobe_from_different_cu.sh83
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh43
-rw-r--r--tools/perf/tests/sigtrap.c227
-rw-r--r--tools/perf/tests/stat.c119
-rw-r--r--tools/perf/tests/sw-clock.c147
-rw-r--r--tools/perf/tests/switch-tracking.c595
-rw-r--r--tools/perf/tests/task-exit.c155
-rw-r--r--tools/perf/tests/tests.h183
-rw-r--r--tools/perf/tests/thread-map.c151
-rw-r--r--tools/perf/tests/thread-maps-share.c100
-rw-r--r--tools/perf/tests/time-utils-test.c253
-rw-r--r--tools/perf/tests/topology.c242
-rw-r--r--tools/perf/tests/unit_number__scnprintf.c42
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c362
-rw-r--r--tools/perf/tests/wp.c207
183 files changed, 25482 insertions, 0 deletions
diff --git a/tools/perf/tests/.gitignore b/tools/perf/tests/.gitignore
new file mode 100644
index 000000000..d053b325f
--- /dev/null
+++ b/tools/perf/tests/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+llvm-src-base.c
+llvm-src-kbuild.c
+llvm-src-prologue.c
+llvm-src-relocation.c
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
new file mode 100644
index 000000000..2064a640f
--- /dev/null
+++ b/tools/perf/tests/Build
@@ -0,0 +1,105 @@
+# SPDX-License-Identifier: GPL-2.0
+
+perf-y += builtin-test.o
+perf-y += builtin-test-list.o
+perf-y += parse-events.o
+perf-y += dso-data.o
+perf-y += attr.o
+perf-y += vmlinux-kallsyms.o
+perf-y += openat-syscall.o
+perf-y += openat-syscall-all-cpus.o
+perf-y += openat-syscall-tp-fields.o
+perf-y += mmap-basic.o
+perf-y += perf-record.o
+perf-y += evsel-roundtrip-name.o
+perf-y += evsel-tp-sched.o
+perf-y += fdarray.o
+perf-y += pmu.o
+perf-y += pmu-events.o
+perf-y += hists_common.o
+perf-y += hists_link.o
+perf-y += hists_filter.o
+perf-y += hists_output.o
+perf-y += hists_cumulate.o
+perf-y += python-use.o
+perf-y += bp_signal.o
+perf-y += bp_signal_overflow.o
+perf-y += bp_account.o
+perf-y += wp.o
+perf-y += task-exit.o
+perf-y += sw-clock.o
+perf-y += mmap-thread-lookup.o
+perf-y += thread-maps-share.o
+perf-y += switch-tracking.o
+perf-y += keep-tracking.o
+perf-y += code-reading.o
+perf-y += sample-parsing.o
+perf-y += parse-no-sample-id-all.o
+perf-y += kmod-path.o
+perf-y += thread-map.o
+perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o llvm-src-relocation.o
+perf-y += bpf.o
+perf-y += topology.o
+perf-y += mem.o
+perf-y += cpumap.o
+perf-y += stat.o
+perf-y += event_update.o
+perf-y += event-times.o
+perf-y += expr.o
+perf-y += backward-ring-buffer.o
+perf-y += sdt.o
+perf-y += is_printable_array.o
+perf-y += bitmap.o
+perf-y += perf-hooks.o
+perf-y += clang.o
+perf-y += unit_number__scnprintf.o
+perf-y += mem2node.o
+perf-y += maps.o
+perf-y += time-utils-test.o
+perf-y += genelf.o
+perf-y += api-io.o
+perf-y += demangle-java-test.o
+perf-y += demangle-ocaml-test.o
+perf-y += pfm.o
+perf-y += parse-metric.o
+perf-y += pe-file-parsing.o
+perf-y += expand-cgroup.o
+perf-y += perf-time-to-tsc.o
+perf-y += dlfilter-test.o
+perf-y += sigtrap.o
+
+$(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_base_prog[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
+$(OUTPUT)tests/llvm-src-kbuild.c: tests/bpf-script-test-kbuild.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_test_kbuild_prog[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
+$(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_test_prologue_prog[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
+$(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/Build
+ $(call rule_mkdir)
+ $(Q)echo '#include <tests/llvm.h>' > $@
+ $(Q)echo 'const char test_llvm__bpf_test_relocation[] =' >> $@
+ $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+ $(Q)echo ';' >> $@
+
+ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc))
+perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
+endif
+
+CFLAGS_attr.o += -DBINDIR="BUILD_STR($(bindir_SQ))" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUILD_STR($(PYTHON_WORD))"
+CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls
diff --git a/tools/perf/tests/api-io.c b/tools/perf/tests/api-io.c
new file mode 100644
index 000000000..e91cf2c12
--- /dev/null
+++ b/tools/perf/tests/api-io.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "debug.h"
+#include "tests.h"
+#include <api/io.h>
+#include <linux/kernel.h>
+
+#define TEMPL "/tmp/perf-test-XXXXXX"
+
+#define EXPECT_EQUAL(val, expected) \
+do { \
+ if (val != expected) { \
+ pr_debug("%s:%d: %d != %d\n", \
+ __FILE__, __LINE__, val, expected); \
+ ret = -1; \
+ } \
+} while (0)
+
+#define EXPECT_EQUAL64(val, expected) \
+do { \
+ if (val != expected) { \
+ pr_debug("%s:%d: %lld != %lld\n", \
+ __FILE__, __LINE__, val, expected); \
+ ret = -1; \
+ } \
+} while (0)
+
+static int make_test_file(char path[PATH_MAX], const char *contents)
+{
+ ssize_t contents_len = strlen(contents);
+ int fd;
+
+ strcpy(path, TEMPL);
+ fd = mkstemp(path);
+ if (fd < 0) {
+ pr_debug("mkstemp failed");
+ return -1;
+ }
+ if (write(fd, contents, contents_len) < contents_len) {
+ pr_debug("short write");
+ close(fd);
+ unlink(path);
+ return -1;
+ }
+ close(fd);
+ return 0;
+}
+
+static int setup_test(char path[PATH_MAX], const char *contents,
+ size_t buf_size, struct io *io)
+{
+ if (make_test_file(path, contents))
+ return -1;
+
+ io->fd = open(path, O_RDONLY);
+ if (io->fd < 0) {
+ pr_debug("Failed to open '%s'\n", path);
+ unlink(path);
+ return -1;
+ }
+ io->buf = malloc(buf_size);
+ if (io->buf == NULL) {
+ pr_debug("Failed to allocate memory");
+ close(io->fd);
+ unlink(path);
+ return -1;
+ }
+ io__init(io, io->fd, io->buf, buf_size);
+ return 0;
+}
+
+static void cleanup_test(char path[PATH_MAX], struct io *io)
+{
+ free(io->buf);
+ close(io->fd);
+ unlink(path);
+}
+
+static int do_test_get_char(const char *test_string, size_t buf_size)
+{
+ char path[PATH_MAX];
+ struct io io;
+ int ch, ret = 0;
+ size_t i;
+
+ if (setup_test(path, test_string, buf_size, &io))
+ return -1;
+
+ for (i = 0; i < strlen(test_string); i++) {
+ ch = io__get_char(&io);
+
+ EXPECT_EQUAL(ch, test_string[i]);
+ EXPECT_EQUAL(io.eof, false);
+ }
+ ch = io__get_char(&io);
+ EXPECT_EQUAL(ch, -1);
+ EXPECT_EQUAL(io.eof, true);
+
+ cleanup_test(path, &io);
+ return ret;
+}
+
+static int test_get_char(void)
+{
+ int i, ret = 0;
+ size_t j;
+
+ static const char *const test_strings[] = {
+ "12345678abcdef90",
+ "a\nb\nc\nd\n",
+ "\a\b\t\v\f\r",
+ };
+ for (i = 0; i <= 10; i++) {
+ for (j = 0; j < ARRAY_SIZE(test_strings); j++) {
+ if (do_test_get_char(test_strings[j], 1 << i))
+ ret = -1;
+ }
+ }
+ return ret;
+}
+
+static int do_test_get_hex(const char *test_string,
+ __u64 val1, int ch1,
+ __u64 val2, int ch2,
+ __u64 val3, int ch3,
+ bool end_eof)
+{
+ char path[PATH_MAX];
+ struct io io;
+ int ch, ret = 0;
+ __u64 hex;
+
+ if (setup_test(path, test_string, 4, &io))
+ return -1;
+
+ ch = io__get_hex(&io, &hex);
+ EXPECT_EQUAL64(hex, val1);
+ EXPECT_EQUAL(ch, ch1);
+
+ ch = io__get_hex(&io, &hex);
+ EXPECT_EQUAL64(hex, val2);
+ EXPECT_EQUAL(ch, ch2);
+
+ ch = io__get_hex(&io, &hex);
+ EXPECT_EQUAL64(hex, val3);
+ EXPECT_EQUAL(ch, ch3);
+
+ EXPECT_EQUAL(io.eof, end_eof);
+
+ cleanup_test(path, &io);
+ return ret;
+}
+
+static int test_get_hex(void)
+{
+ int ret = 0;
+
+ if (do_test_get_hex("12345678abcdef90",
+ 0x12345678abcdef90, -1,
+ 0, -1,
+ 0, -1,
+ true))
+ ret = -1;
+
+ if (do_test_get_hex("1\n2\n3\n",
+ 1, '\n',
+ 2, '\n',
+ 3, '\n',
+ false))
+ ret = -1;
+
+ if (do_test_get_hex("12345678ABCDEF90;a;b",
+ 0x12345678abcdef90, ';',
+ 0xa, ';',
+ 0xb, -1,
+ true))
+ ret = -1;
+
+ if (do_test_get_hex("0x1x2x",
+ 0, 'x',
+ 1, 'x',
+ 2, 'x',
+ false))
+ ret = -1;
+
+ if (do_test_get_hex("x1x",
+ 0, -2,
+ 1, 'x',
+ 0, -1,
+ true))
+ ret = -1;
+
+ if (do_test_get_hex("10000000000000000000000000000abcdefgh99i",
+ 0xabcdef, 'g',
+ 0, -2,
+ 0x99, 'i',
+ false))
+ ret = -1;
+
+ return ret;
+}
+
+static int do_test_get_dec(const char *test_string,
+ __u64 val1, int ch1,
+ __u64 val2, int ch2,
+ __u64 val3, int ch3,
+ bool end_eof)
+{
+ char path[PATH_MAX];
+ struct io io;
+ int ch, ret = 0;
+ __u64 dec;
+
+ if (setup_test(path, test_string, 4, &io))
+ return -1;
+
+ ch = io__get_dec(&io, &dec);
+ EXPECT_EQUAL64(dec, val1);
+ EXPECT_EQUAL(ch, ch1);
+
+ ch = io__get_dec(&io, &dec);
+ EXPECT_EQUAL64(dec, val2);
+ EXPECT_EQUAL(ch, ch2);
+
+ ch = io__get_dec(&io, &dec);
+ EXPECT_EQUAL64(dec, val3);
+ EXPECT_EQUAL(ch, ch3);
+
+ EXPECT_EQUAL(io.eof, end_eof);
+
+ cleanup_test(path, &io);
+ return ret;
+}
+
+static int test_get_dec(void)
+{
+ int ret = 0;
+
+ if (do_test_get_dec("12345678abcdef90",
+ 12345678, 'a',
+ 0, -2,
+ 0, -2,
+ false))
+ ret = -1;
+
+ if (do_test_get_dec("1\n2\n3\n",
+ 1, '\n',
+ 2, '\n',
+ 3, '\n',
+ false))
+ ret = -1;
+
+ if (do_test_get_dec("12345678;1;2",
+ 12345678, ';',
+ 1, ';',
+ 2, -1,
+ true))
+ ret = -1;
+
+ if (do_test_get_dec("0x1x2x",
+ 0, 'x',
+ 1, 'x',
+ 2, 'x',
+ false))
+ ret = -1;
+
+ if (do_test_get_dec("x1x",
+ 0, -2,
+ 1, 'x',
+ 0, -1,
+ true))
+ ret = -1;
+
+ if (do_test_get_dec("10000000000000000000000000000000000000000000000000000000000123456789ab99c",
+ 123456789, 'a',
+ 0, -2,
+ 99, 'c',
+ false))
+ ret = -1;
+
+ return ret;
+}
+
+static int test__api_io(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int ret = 0;
+
+ if (test_get_char())
+ ret = TEST_FAIL;
+ if (test_get_hex())
+ ret = TEST_FAIL;
+ if (test_get_dec())
+ ret = TEST_FAIL;
+ return ret;
+}
+
+DEFINE_SUITE("Test api io", api_io);
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
new file mode 100644
index 000000000..56fba08a3
--- /dev/null
+++ b/tools/perf/tests/attr.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The struct perf_event_attr test support.
+ *
+ * This test is embedded inside into perf directly and is governed
+ * by the PERF_TEST_ATTR environment variable and hook inside
+ * sys_perf_event_open function.
+ *
+ * The general idea is to store 'struct perf_event_attr' details for
+ * each event created within single perf command. Each event details
+ * are stored into separate text file. Once perf command is finished
+ * these files can be checked for values we expect for command.
+ *
+ * Besides 'struct perf_event_attr' values we also store 'fd' and
+ * 'group_fd' values to allow checking for groups created.
+ *
+ * This all is triggered by setting PERF_TEST_ATTR environment variable.
+ * It must contain name of existing directory with access and write
+ * permissions. All the event text files are stored there.
+ */
+
+#include <debug.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <subcmd/exec-cmd.h>
+#include "event.h"
+#include "util.h"
+#include "tests.h"
+#include "pmu.h"
+
+#define ENV "PERF_TEST_ATTR"
+
+static char *dir;
+static bool ready;
+
+void test_attr__init(void)
+{
+ dir = getenv(ENV);
+ test_attr__enabled = (dir != NULL);
+}
+
+#define BUFSIZE 1024
+
+#define __WRITE_ASS(str, fmt, data) \
+do { \
+ char buf[BUFSIZE]; \
+ size_t size; \
+ \
+ size = snprintf(buf, BUFSIZE, #str "=%"fmt "\n", data); \
+ if (1 != fwrite(buf, size, 1, file)) { \
+ perror("test attr - failed to write event file"); \
+ fclose(file); \
+ return -1; \
+ } \
+ \
+} while (0)
+
+#define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field)
+
+static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
+ int fd, int group_fd, unsigned long flags)
+{
+ FILE *file;
+ char path[PATH_MAX];
+
+ if (!ready)
+ return 0;
+
+ snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
+ attr->type, attr->config, fd);
+
+ file = fopen(path, "w+");
+ if (!file) {
+ perror("test attr - failed to open event file");
+ return -1;
+ }
+
+ if (fprintf(file, "[event-%d-%llu-%d]\n",
+ attr->type, attr->config, fd) < 0) {
+ perror("test attr - failed to write event file");
+ fclose(file);
+ return -1;
+ }
+
+ /* syscall arguments */
+ __WRITE_ASS(fd, "d", fd);
+ __WRITE_ASS(group_fd, "d", group_fd);
+ __WRITE_ASS(cpu, "d", cpu.cpu);
+ __WRITE_ASS(pid, "d", pid);
+ __WRITE_ASS(flags, "lu", flags);
+
+ /* struct perf_event_attr */
+ WRITE_ASS(type, PRIu32);
+ WRITE_ASS(size, PRIu32);
+ WRITE_ASS(config, "llu");
+ WRITE_ASS(sample_period, "llu");
+ WRITE_ASS(sample_type, "llu");
+ WRITE_ASS(read_format, "llu");
+ WRITE_ASS(disabled, "d");
+ WRITE_ASS(inherit, "d");
+ WRITE_ASS(pinned, "d");
+ WRITE_ASS(exclusive, "d");
+ WRITE_ASS(exclude_user, "d");
+ WRITE_ASS(exclude_kernel, "d");
+ WRITE_ASS(exclude_hv, "d");
+ WRITE_ASS(exclude_idle, "d");
+ WRITE_ASS(mmap, "d");
+ WRITE_ASS(comm, "d");
+ WRITE_ASS(freq, "d");
+ WRITE_ASS(inherit_stat, "d");
+ WRITE_ASS(enable_on_exec, "d");
+ WRITE_ASS(task, "d");
+ WRITE_ASS(watermark, "d");
+ WRITE_ASS(precise_ip, "d");
+ WRITE_ASS(mmap_data, "d");
+ WRITE_ASS(sample_id_all, "d");
+ WRITE_ASS(exclude_host, "d");
+ WRITE_ASS(exclude_guest, "d");
+ WRITE_ASS(exclude_callchain_kernel, "d");
+ WRITE_ASS(exclude_callchain_user, "d");
+ WRITE_ASS(mmap2, "d");
+ WRITE_ASS(comm_exec, "d");
+ WRITE_ASS(context_switch, "d");
+ WRITE_ASS(write_backward, "d");
+ WRITE_ASS(namespaces, "d");
+ WRITE_ASS(use_clockid, "d");
+ WRITE_ASS(wakeup_events, PRIu32);
+ WRITE_ASS(bp_type, PRIu32);
+ WRITE_ASS(config1, "llu");
+ WRITE_ASS(config2, "llu");
+ WRITE_ASS(branch_sample_type, "llu");
+ WRITE_ASS(sample_regs_user, "llu");
+ WRITE_ASS(sample_stack_user, PRIu32);
+
+ fclose(file);
+ return 0;
+}
+
+void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
+ int fd, int group_fd, unsigned long flags)
+{
+ int errno_saved = errno;
+
+ if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
+ pr_err("test attr FAILED");
+ exit(128);
+ }
+
+ errno = errno_saved;
+}
+
+void test_attr__ready(void)
+{
+ if (unlikely(test_attr__enabled) && !ready)
+ ready = true;
+}
+
+static int run_dir(const char *d, const char *perf)
+{
+ char v[] = "-vvvvv";
+ int vcnt = min(verbose, (int) sizeof(v) - 1);
+ char cmd[3*PATH_MAX];
+
+ if (verbose > 0)
+ vcnt++;
+
+ scnprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
+ d, d, perf, vcnt, v);
+
+ return system(cmd) ? TEST_FAIL : TEST_OK;
+}
+
+static int test__attr(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct stat st;
+ char path_perf[PATH_MAX];
+ char path_dir[PATH_MAX];
+ char *exec_path;
+
+ if (perf_pmu__has_hybrid())
+ return TEST_SKIP;
+
+ /* First try development tree tests. */
+ if (!lstat("./tests", &st))
+ return run_dir("./tests", "./perf");
+
+ exec_path = get_argv_exec_path();
+ if (exec_path == NULL)
+ return -1;
+
+ /* Then installed path. */
+ snprintf(path_dir, PATH_MAX, "%s/tests", exec_path);
+ snprintf(path_perf, PATH_MAX, "%s/perf", BINDIR);
+ free(exec_path);
+
+ if (!lstat(path_dir, &st) &&
+ !lstat(path_perf, &st))
+ return run_dir(path_dir, path_perf);
+
+ return TEST_SKIP;
+}
+
+DEFINE_SUITE("Setup struct perf_event_attr", attr);
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
new file mode 100644
index 000000000..cb39ac46b
--- /dev/null
+++ b/tools/perf/tests/attr.py
@@ -0,0 +1,397 @@
+# SPDX-License-Identifier: GPL-2.0
+
+from __future__ import print_function
+
+import os
+import sys
+import glob
+import optparse
+import tempfile
+import logging
+import shutil
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+def data_equal(a, b):
+ # Allow multiple values in assignment separated by '|'
+ a_list = a.split('|')
+ b_list = b.split('|')
+
+ for a_item in a_list:
+ for b_item in b_list:
+ if (a_item == b_item):
+ return True
+ elif (a_item == '*') or (b_item == '*'):
+ return True
+
+ return False
+
+class Fail(Exception):
+ def __init__(self, test, msg):
+ self.msg = msg
+ self.test = test
+ def getMsg(self):
+ return '\'%s\' - %s' % (self.test.path, self.msg)
+
+class Notest(Exception):
+ def __init__(self, test, arch):
+ self.arch = arch
+ self.test = test
+ def getMsg(self):
+ return '[%s] \'%s\'' % (self.arch, self.test.path)
+
+class Unsup(Exception):
+ def __init__(self, test):
+ self.test = test
+ def getMsg(self):
+ return '\'%s\'' % self.test.path
+
+class Event(dict):
+ terms = [
+ 'cpu',
+ 'flags',
+ 'type',
+ 'size',
+ 'config',
+ 'sample_period',
+ 'sample_type',
+ 'read_format',
+ 'disabled',
+ 'inherit',
+ 'pinned',
+ 'exclusive',
+ 'exclude_user',
+ 'exclude_kernel',
+ 'exclude_hv',
+ 'exclude_idle',
+ 'mmap',
+ 'comm',
+ 'freq',
+ 'inherit_stat',
+ 'enable_on_exec',
+ 'task',
+ 'watermark',
+ 'precise_ip',
+ 'mmap_data',
+ 'sample_id_all',
+ 'exclude_host',
+ 'exclude_guest',
+ 'exclude_callchain_kernel',
+ 'exclude_callchain_user',
+ 'wakeup_events',
+ 'bp_type',
+ 'config1',
+ 'config2',
+ 'branch_sample_type',
+ 'sample_regs_user',
+ 'sample_stack_user',
+ ]
+
+ def add(self, data):
+ for key, val in data:
+ log.debug(" %s = %s" % (key, val))
+ self[key] = val
+
+ def __init__(self, name, data, base):
+ log.debug(" Event %s" % name);
+ self.name = name;
+ self.group = ''
+ self.add(base)
+ self.add(data)
+
+ def equal(self, other):
+ for t in Event.terms:
+ log.debug(" [%s] %s %s" % (t, self[t], other[t]));
+ if t not in self or t not in other:
+ return False
+ if not data_equal(self[t], other[t]):
+ return False
+ return True
+
+ def optional(self):
+ if 'optional' in self and self['optional'] == '1':
+ return True
+ return False
+
+ def diff(self, other):
+ for t in Event.terms:
+ if t not in self or t not in other:
+ continue
+ if not data_equal(self[t], other[t]):
+ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+
+# Test file description needs to have following sections:
+# [config]
+# - just single instance in file
+# - needs to specify:
+# 'command' - perf command name
+# 'args' - special command arguments
+# 'ret' - expected command return value (0 by default)
+# 'arch' - architecture specific test (optional)
+# comma separated list, ! at the beginning
+# negates it.
+#
+# [eventX:base]
+# - one or multiple instances in file
+# - expected values assignments
+class Test(object):
+ def __init__(self, path, options):
+ parser = configparser.SafeConfigParser()
+ parser.read(path)
+
+ log.warning("running '%s'" % path)
+
+ self.path = path
+ self.test_dir = options.test_dir
+ self.perf = options.perf
+ self.command = parser.get('config', 'command')
+ self.args = parser.get('config', 'args')
+
+ try:
+ self.ret = parser.get('config', 'ret')
+ except:
+ self.ret = 0
+
+ try:
+ self.arch = parser.get('config', 'arch')
+ log.warning("test limitation '%s'" % self.arch)
+ except:
+ self.arch = ''
+
+ self.expect = {}
+ self.result = {}
+ log.debug(" loading expected events");
+ self.load_events(path, self.expect)
+
+ def is_event(self, name):
+ if name.find("event") == -1:
+ return False
+ else:
+ return True
+
+ def skip_test(self, myarch):
+ # If architecture not set always run test
+ if self.arch == '':
+ # log.warning("test for arch %s is ok" % myarch)
+ return False
+
+ # Allow multiple values in assignment separated by ','
+ arch_list = self.arch.split(',')
+
+ # Handle negated list such as !s390x,ppc
+ if arch_list[0][0] == '!':
+ arch_list[0] = arch_list[0][1:]
+ log.warning("excluded architecture list %s" % arch_list)
+ for arch_item in arch_list:
+ # log.warning("test for %s arch is %s" % (arch_item, myarch))
+ if arch_item == myarch:
+ return True
+ return False
+
+ for arch_item in arch_list:
+ # log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch))
+ if arch_item == myarch:
+ return False
+ return True
+
+ def load_events(self, path, events):
+ parser_event = configparser.SafeConfigParser()
+ parser_event.read(path)
+
+ # The event record section header contains 'event' word,
+ # optionaly followed by ':' allowing to load 'parent
+ # event' first as a base
+ for section in filter(self.is_event, parser_event.sections()):
+
+ parser_items = parser_event.items(section);
+ base_items = {}
+
+ # Read parent event if there's any
+ if (':' in section):
+ base = section[section.index(':') + 1:]
+ parser_base = configparser.SafeConfigParser()
+ parser_base.read(self.test_dir + '/' + base)
+ base_items = parser_base.items('event')
+
+ e = Event(section, parser_items, base_items)
+ events[section] = e
+
+ def run_cmd(self, tempdir):
+ junk1, junk2, junk3, junk4, myarch = (os.uname())
+
+ if self.skip_test(myarch):
+ raise Notest(self, myarch)
+
+ cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
+ self.perf, self.command, tempdir, self.args)
+ ret = os.WEXITSTATUS(os.system(cmd))
+
+ log.info(" '%s' ret '%s', expected '%s'" % (cmd, str(ret), str(self.ret)))
+
+ if not data_equal(str(ret), str(self.ret)):
+ raise Unsup(self)
+
+ def compare(self, expect, result):
+ match = {}
+
+ log.debug(" compare");
+
+ # For each expected event find all matching
+ # events in result. Fail if there's not any.
+ for exp_name, exp_event in expect.items():
+ exp_list = []
+ res_event = {}
+ log.debug(" matching [%s]" % exp_name)
+ for res_name, res_event in result.items():
+ log.debug(" to [%s]" % res_name)
+ if (exp_event.equal(res_event)):
+ exp_list.append(res_name)
+ log.debug(" ->OK")
+ else:
+ log.debug(" ->FAIL");
+
+ log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
+
+ # we did not any matching event - fail
+ if not exp_list:
+ if exp_event.optional():
+ log.debug(" %s does not match, but is optional" % exp_name)
+ else:
+ if not res_event:
+ log.debug(" res_event is empty");
+ else:
+ exp_event.diff(res_event)
+ raise Fail(self, 'match failure');
+
+ match[exp_name] = exp_list
+
+ # For each defined group in the expected events
+ # check we match the same group in the result.
+ for exp_name, exp_event in expect.items():
+ group = exp_event.group
+
+ if (group == ''):
+ continue
+
+ for res_name in match[exp_name]:
+ res_group = result[res_name].group
+ if res_group not in match[group]:
+ raise Fail(self, 'group failure')
+
+ log.debug(" group: [%s] matches group leader %s" %
+ (exp_name, str(match[group])))
+
+ log.debug(" matched")
+
+ def resolve_groups(self, events):
+ for name, event in events.items():
+ group_fd = event['group_fd'];
+ if group_fd == '-1':
+ continue;
+
+ for iname, ievent in events.items():
+ if (ievent['fd'] == group_fd):
+ event.group = iname
+ log.debug('[%s] has group leader [%s]' % (name, iname))
+ break;
+
+ def run(self):
+ tempdir = tempfile.mkdtemp();
+
+ try:
+ # run the test script
+ self.run_cmd(tempdir);
+
+ # load events expectation for the test
+ log.debug(" loading result events");
+ for f in glob.glob(tempdir + '/event*'):
+ self.load_events(f, self.result);
+
+ # resolve group_fd to event names
+ self.resolve_groups(self.expect);
+ self.resolve_groups(self.result);
+
+ # do the expectation - results matching - both ways
+ self.compare(self.expect, self.result)
+ self.compare(self.result, self.expect)
+
+ finally:
+ # cleanup
+ shutil.rmtree(tempdir)
+
+
+def run_tests(options):
+ for f in glob.glob(options.test_dir + '/' + options.test):
+ try:
+ Test(f, options).run()
+ except Unsup as obj:
+ log.warning("unsupp %s" % obj.getMsg())
+ except Notest as obj:
+ log.warning("skipped %s" % obj.getMsg())
+
+def setup_log(verbose):
+ global log
+ level = logging.CRITICAL
+
+ if verbose == 1:
+ level = logging.WARNING
+ if verbose == 2:
+ level = logging.INFO
+ if verbose >= 3:
+ level = logging.DEBUG
+
+ log = logging.getLogger('test')
+ log.setLevel(level)
+ ch = logging.StreamHandler()
+ ch.setLevel(level)
+ formatter = logging.Formatter('%(message)s')
+ ch.setFormatter(formatter)
+ log.addHandler(ch)
+
+USAGE = '''%s [OPTIONS]
+ -d dir # tests dir
+ -p path # perf binary
+ -t test # single test
+ -v # verbose level
+''' % sys.argv[0]
+
+def main():
+ parser = optparse.OptionParser(usage=USAGE)
+
+ parser.add_option("-t", "--test",
+ action="store", type="string", dest="test")
+ parser.add_option("-d", "--test-dir",
+ action="store", type="string", dest="test_dir")
+ parser.add_option("-p", "--perf",
+ action="store", type="string", dest="perf")
+ parser.add_option("-v", "--verbose",
+ default=0, action="count", dest="verbose")
+
+ options, args = parser.parse_args()
+ if args:
+ parser.error('FAILED wrong arguments %s' % ' '.join(args))
+ return -1
+
+ setup_log(options.verbose)
+
+ if not options.test_dir:
+ print('FAILED no -d option specified')
+ sys.exit(-1)
+
+ if not options.test:
+ options.test = 'test*'
+
+ try:
+ run_tests(options)
+
+ except Fail as obj:
+ print("FAILED %s" % obj.getMsg())
+ sys.exit(-1)
+
+ sys.exit(0)
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/perf/tests/attr/README b/tools/perf/tests/attr/README
new file mode 100644
index 000000000..eb3f7d4bb
--- /dev/null
+++ b/tools/perf/tests/attr/README
@@ -0,0 +1,71 @@
+The struct perf_event_attr test (attr tests) support
+====================================================
+This testing support is embedded into perf directly and is governed
+by the PERF_TEST_ATTR environment variable and hook inside the
+sys_perf_event_open function.
+
+The general idea is to store 'struct perf_event_attr' details for
+each event created within single perf command. Each event details
+are stored into separate text file. Once perf command is finished
+these files are checked for values we expect for command.
+
+The attr tests consist of following parts:
+
+tests/attr.c
+------------
+This is the sys_perf_event_open hook implementation. The hook
+is triggered when the PERF_TEST_ATTR environment variable is
+defined. It must contain name of existing directory with access
+and write permissions.
+
+For each sys_perf_event_open call event details are stored in
+separate file. Besides 'struct perf_event_attr' values we also
+store 'fd' and 'group_fd' values to allow checking for groups.
+
+tests/attr.py
+-------------
+This is the python script that does all the hard work. It reads
+the test definition, executes it and checks results.
+
+tests/attr/
+-----------
+Directory containing all attr test definitions.
+Following tests are defined (with perf commands):
+
+ perf record kill (test-record-basic)
+ perf record -b kill (test-record-branch-any)
+ perf record -j any kill (test-record-branch-filter-any)
+ perf record -j any_call kill (test-record-branch-filter-any_call)
+ perf record -j any_ret kill (test-record-branch-filter-any_ret)
+ perf record -j hv kill (test-record-branch-filter-hv)
+ perf record -j ind_call kill (test-record-branch-filter-ind_call)
+ perf record -j k kill (test-record-branch-filter-k)
+ perf record -j u kill (test-record-branch-filter-u)
+ perf record -c 123 kill (test-record-count)
+ perf record -d kill (test-record-data)
+ perf record -F 100 kill (test-record-freq)
+ perf record -g kill (test-record-graph-default)
+ perf record -g kill (test-record-graph-default-aarch64)
+ perf record --call-graph dwarf kill (test-record-graph-dwarf)
+ perf record --call-graph fp kill (test-record-graph-fp)
+ perf record --call-graph fp kill (test-record-graph-fp-aarch64)
+ perf record --group -e cycles,instructions kill (test-record-group)
+ perf record -e '{cycles,instructions}' kill (test-record-group1)
+ perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
+ perf record -D kill (test-record-no-delay)
+ perf record -i kill (test-record-no-inherit)
+ perf record -n kill (test-record-no-samples)
+ perf record -c 100 -P kill (test-record-period)
+ perf record -c 1 --pfm-events=cycles:period=2 (test-record-pfm-period)
+ perf record -R kill (test-record-raw)
+ perf record -c 2 -e arm_spe_0// -- kill (test-record-spe-period)
+ perf record -e arm_spe_0/period=3/ -- kill (test-record-spe-period-term)
+ perf record -e arm_spe_0/pa_enable=1/ -- kill (test-record-spe-physical-address)
+ perf stat -e cycles kill (test-stat-basic)
+ perf stat kill (test-stat-default)
+ perf stat -d kill (test-stat-detailed-1)
+ perf stat -dd kill (test-stat-detailed-2)
+ perf stat -ddd kill (test-stat-detailed-3)
+ perf stat --group -e cycles,instructions kill (test-stat-group)
+ perf stat -e '{cycles,instructions}' kill (test-stat-group1)
+ perf stat -i -e cycles kill (test-stat-no-inherit)
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
new file mode 100644
index 000000000..3ef07a12a
--- /dev/null
+++ b/tools/perf/tests/attr/base-record
@@ -0,0 +1,41 @@
+[event]
+fd=1
+group_fd=-1
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
+cpu=*
+type=0|1
+size=128
+config=0
+sample_period=*
+sample_type=263
+read_format=0|4|20
+disabled=1
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0|1
+exclude_hv=0|1
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=1
+task=1
+watermark=0
+precise_ip=0|1|2|3
+mmap_data=0
+sample_id_all=1
+exclude_host=0|1
+exclude_guest=0|1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/attr/base-record-spe b/tools/perf/tests/attr/base-record-spe
new file mode 100644
index 000000000..08fa96b59
--- /dev/null
+++ b/tools/perf/tests/attr/base-record-spe
@@ -0,0 +1,40 @@
+[event]
+fd=*
+group_fd=-1
+flags=*
+cpu=*
+type=*
+size=*
+config=*
+sample_period=*
+sample_type=*
+read_format=*
+disabled=*
+inherit=*
+pinned=*
+exclusive=*
+exclude_user=*
+exclude_kernel=*
+exclude_hv=*
+exclude_idle=*
+mmap=*
+comm=*
+freq=*
+inherit_stat=*
+enable_on_exec=*
+task=*
+watermark=*
+precise_ip=*
+mmap_data=*
+sample_id_all=*
+exclude_host=*
+exclude_guest=*
+exclude_callchain_kernel=*
+exclude_callchain_user=*
+wakeup_events=*
+bp_type=*
+config1=*
+config2=*
+branch_sample_type=*
+sample_regs_user=*
+sample_stack_user=*
diff --git a/tools/perf/tests/attr/base-stat b/tools/perf/tests/attr/base-stat
new file mode 100644
index 000000000..408164456
--- /dev/null
+++ b/tools/perf/tests/attr/base-stat
@@ -0,0 +1,41 @@
+[event]
+fd=1
+group_fd=-1
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
+cpu=*
+type=0
+size=128
+config=0
+sample_period=0
+sample_type=65536
+read_format=3
+disabled=1
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0|1
+exclude_hv=0
+exclude_idle=0
+mmap=0
+comm=0
+freq=0
+inherit_stat=0
+enable_on_exec=1
+task=0
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=0
+exclude_host=0|1
+exclude_guest=0|1
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/attr/system-wide-dummy b/tools/perf/tests/attr/system-wide-dummy
new file mode 100644
index 000000000..8fec06eda
--- /dev/null
+++ b/tools/perf/tests/attr/system-wide-dummy
@@ -0,0 +1,50 @@
+# Event added by system-wide or CPU perf-record to handle the race of
+# processes starting while /proc is processed.
+[event]
+fd=1
+group_fd=-1
+cpu=*
+pid=-1
+flags=8
+type=1
+size=128
+config=9
+sample_period=4000
+sample_type=455
+read_format=4|20
+# Event will be enabled right away.
+disabled=0
+inherit=1
+pinned=0
+exclusive=0
+exclude_user=0
+exclude_kernel=0
+exclude_hv=0
+exclude_idle=0
+mmap=1
+comm=1
+freq=1
+inherit_stat=0
+enable_on_exec=0
+task=1
+watermark=0
+precise_ip=0
+mmap_data=0
+sample_id_all=1
+exclude_host=0
+exclude_guest=0
+exclude_callchain_kernel=0
+exclude_callchain_user=0
+mmap2=1
+comm_exec=1
+context_switch=0
+write_backward=0
+namespaces=0
+use_clockid=0
+wakeup_events=0
+bp_type=0
+config1=0
+config2=0
+branch_sample_type=0
+sample_regs_user=0
+sample_stack_user=0
diff --git a/tools/perf/tests/attr/test-record-C0 b/tools/perf/tests/attr/test-record-C0
new file mode 100644
index 000000000..317730b90
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-C0
@@ -0,0 +1,22 @@
+[config]
+command = record
+args = --no-bpf-event -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+cpu=0
+
+# no enable on exec for CPU attached
+enable_on_exec=0
+
+# PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_ID | PERF_SAMPLE_PERIOD
+# + PERF_SAMPLE_CPU added by -C 0
+sample_type=455
+
+# Dummy event handles mmaps, comm and task.
+mmap=0
+comm=0
+task=0
+
+[event:system-wide-dummy]
diff --git a/tools/perf/tests/attr/test-record-basic b/tools/perf/tests/attr/test-record-basic
new file mode 100644
index 000000000..b0ca42a5e
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-basic
@@ -0,0 +1,6 @@
+[config]
+command = record
+args = --no-bpf-event kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
diff --git a/tools/perf/tests/attr/test-record-branch-any b/tools/perf/tests/attr/test-record-branch-any
new file mode 100644
index 000000000..1a99b3ce6
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-any
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -b kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any b/tools/perf/tests/attr/test-record-branch-filter-any
new file mode 100644
index 000000000..709768b50
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-any
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_call b/tools/perf/tests/attr/test-record-branch-filter-any_call
new file mode 100644
index 000000000..f943221f7
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_call
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any_call kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=16
diff --git a/tools/perf/tests/attr/test-record-branch-filter-any_ret b/tools/perf/tests/attr/test-record-branch-filter-any_ret
new file mode 100644
index 000000000..fd4f5b415
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-any_ret
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j any_ret kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=32
diff --git a/tools/perf/tests/attr/test-record-branch-filter-hv b/tools/perf/tests/attr/test-record-branch-filter-hv
new file mode 100644
index 000000000..4e52d685e
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-hv
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j hv kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/attr/test-record-branch-filter-ind_call b/tools/perf/tests/attr/test-record-branch-filter-ind_call
new file mode 100644
index 000000000..e08c6ab37
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-ind_call
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j ind_call kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=64
diff --git a/tools/perf/tests/attr/test-record-branch-filter-k b/tools/perf/tests/attr/test-record-branch-filter-k
new file mode 100644
index 000000000..b4b98f84f
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-k
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j k kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/attr/test-record-branch-filter-u b/tools/perf/tests/attr/test-record-branch-filter-u
new file mode 100644
index 000000000..fb9610edb
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-branch-filter-u
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -j u kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=2311
+branch_sample_type=8
diff --git a/tools/perf/tests/attr/test-record-count b/tools/perf/tests/attr/test-record-count
new file mode 100644
index 000000000..5e9b9019d
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-count
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -c 123 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=123
+sample_type=7
+freq=0
diff --git a/tools/perf/tests/attr/test-record-data b/tools/perf/tests/attr/test-record-data
new file mode 100644
index 000000000..a99bb1314
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-data
@@ -0,0 +1,10 @@
+[config]
+command = record
+args = --no-bpf-event -d kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+# sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_TIME |
+# PERF_SAMPLE_ADDR | PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC
+sample_type=33039
+mmap_data=1
diff --git a/tools/perf/tests/attr/test-record-freq b/tools/perf/tests/attr/test-record-freq
new file mode 100644
index 000000000..89e29f6b2
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-freq
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -F 100 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=100
diff --git a/tools/perf/tests/attr/test-record-graph-default b/tools/perf/tests/attr/test-record-graph-default
new file mode 100644
index 000000000..f0a18b4ea
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-default
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+# arm64 enables registers in the default mode (fp)
+arch = !aarch64
+
+[event:base-record]
+sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-default-aarch64 b/tools/perf/tests/attr/test-record-graph-default-aarch64
new file mode 100644
index 000000000..e98d62efb
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-default-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/attr/test-record-graph-dwarf b/tools/perf/tests/attr/test-record-graph-dwarf
new file mode 100644
index 000000000..ae92061d6
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-dwarf
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph dwarf -- kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=45359
+exclude_callchain_user=1
+sample_stack_user=8192
+# TODO different for each arch, no support for that now
+sample_regs_user=*
+mmap_data=1
diff --git a/tools/perf/tests/attr/test-record-graph-fp b/tools/perf/tests/attr/test-record-graph-fp
new file mode 100644
index 000000000..a6e60e839
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-fp
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+# arm64 enables registers in fp mode
+arch = !aarch64
+
+[event:base-record]
+sample_type=295
diff --git a/tools/perf/tests/attr/test-record-graph-fp-aarch64 b/tools/perf/tests/attr/test-record-graph-fp-aarch64
new file mode 100644
index 000000000..cbeea9971
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-graph-fp-aarch64
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
diff --git a/tools/perf/tests/attr/test-record-group b/tools/perf/tests/attr/test-record-group
new file mode 100644
index 000000000..6c1cff8aa
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group
@@ -0,0 +1,22 @@
+[config]
+command = record
+args = --no-bpf-event --group -e cycles,instructions kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=327
+read_format=4|20
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=1
+sample_type=327
+read_format=4|20
+mmap=0
+comm=0
+task=0
+enable_on_exec=0
+disabled=0
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
new file mode 100644
index 000000000..97e7e64a3
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -0,0 +1,39 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=343
+read_format=12|28
+inherit=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+
+# cache-misses
+type=0
+config=3
+
+# default | PERF_SAMPLE_READ
+sample_type=343
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP | PERF_FORMAT_LOST
+read_format=12|28
+task=0
+mmap=0
+comm=0
+enable_on_exec=0
+disabled=0
+
+# inherit is disabled for group sampling
+inherit=0
+
+# sampling disabled
+sample_freq=0
+sample_period=0
+freq=0
+write_backward=0
diff --git a/tools/perf/tests/attr/test-record-group1 b/tools/perf/tests/attr/test-record-group1
new file mode 100644
index 000000000..eeb1db392
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group1
@@ -0,0 +1,23 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles,instructions}' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=327
+read_format=4|20
+
+[event-2:base-record]
+fd=2
+group_fd=1
+type=0
+config=1
+sample_type=327
+read_format=4|20
+mmap=0
+comm=0
+task=0
+enable_on_exec=0
+disabled=0
diff --git a/tools/perf/tests/attr/test-record-group2 b/tools/perf/tests/attr/test-record-group2
new file mode 100644
index 000000000..cebdaa8e6
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group2
@@ -0,0 +1,29 @@
+[config]
+command = record
+args = --no-bpf-event -e '{cycles/period=1234000/,instructions/period=6789000/}:S' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+config=0|1
+sample_period=1234000
+sample_type=87
+read_format=12|28
+inherit=0
+freq=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+config=0|1
+sample_period=6789000
+sample_type=87
+read_format=12|28
+disabled=0
+inherit=0
+mmap=0
+comm=0
+freq=0
+enable_on_exec=0
+task=0
diff --git a/tools/perf/tests/attr/test-record-no-buffering b/tools/perf/tests/attr/test-record-no-buffering
new file mode 100644
index 000000000..583dcbb07
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-no-buffering
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event --no-buffering kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=263
+watermark=0
+wakeup_events=1
diff --git a/tools/perf/tests/attr/test-record-no-inherit b/tools/perf/tests/attr/test-record-no-inherit
new file mode 100644
index 000000000..15d1dc162
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-no-inherit
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -i kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=263
+inherit=0
diff --git a/tools/perf/tests/attr/test-record-no-samples b/tools/perf/tests/attr/test-record-no-samples
new file mode 100644
index 000000000..596fbd6d5
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-no-samples
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -n kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=0
diff --git a/tools/perf/tests/attr/test-record-period b/tools/perf/tests/attr/test-record-period
new file mode 100644
index 000000000..119101154
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-period
@@ -0,0 +1,8 @@
+[config]
+command = record
+args = --no-bpf-event -c 100 -P kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=100
+freq=0
diff --git a/tools/perf/tests/attr/test-record-pfm-period b/tools/perf/tests/attr/test-record-pfm-period
new file mode 100644
index 000000000..368f5b814
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-pfm-period
@@ -0,0 +1,9 @@
+[config]
+command = record
+args = --no-bpf-event -c 10000 --pfm-events=cycles:period=77777 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_period=77777
+sample_type=7
+freq=0
diff --git a/tools/perf/tests/attr/test-record-raw b/tools/perf/tests/attr/test-record-raw
new file mode 100644
index 000000000..13a5f7860
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-raw
@@ -0,0 +1,7 @@
+[config]
+command = record
+args = --no-bpf-event -R kill >/dev/null 2>&1
+ret = 1
+
+[event:base-record]
+sample_type=1415
diff --git a/tools/perf/tests/attr/test-record-spe-period b/tools/perf/tests/attr/test-record-spe-period
new file mode 100644
index 000000000..75f8c9cd8
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-spe-period
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -c 2 -e arm_spe_0// -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+sample_period=2
+freq=0
+
+# dummy event
+[event-1:base-record-spe]
diff --git a/tools/perf/tests/attr/test-record-spe-period-term b/tools/perf/tests/attr/test-record-spe-period-term
new file mode 100644
index 000000000..8f60a4fec
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-spe-period-term
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -e arm_spe_0/period=3/ -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+sample_period=3
+freq=0
+
+# dummy event
+[event-1:base-record-spe]
diff --git a/tools/perf/tests/attr/test-record-spe-physical-address b/tools/perf/tests/attr/test-record-spe-physical-address
new file mode 100644
index 000000000..7ebcf5012
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-spe-physical-address
@@ -0,0 +1,12 @@
+[config]
+command = record
+args = --no-bpf-event -e arm_spe_0/pa_enable=1/ -- kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event-10:base-record-spe]
+# 622727 is the decimal of IP|TID|TIME|CPU|IDENTIFIER|DATA_SRC|PHYS_ADDR
+sample_type=622727
+
+# dummy event
+[event-1:base-record-spe] \ No newline at end of file
diff --git a/tools/perf/tests/attr/test-stat-C0 b/tools/perf/tests/attr/test-stat-C0
new file mode 100644
index 000000000..a2c76d10b
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-C0
@@ -0,0 +1,10 @@
+[config]
+command = stat
+args = -e cycles -C 0 kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+# events are disabled by default when attached to cpu
+disabled=1
+enable_on_exec=0
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-basic b/tools/perf/tests/attr/test-stat-basic
new file mode 100644
index 000000000..69867d049
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-basic
@@ -0,0 +1,7 @@
+[config]
+command = stat
+args = -e cycles kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-default b/tools/perf/tests/attr/test-stat-default
new file mode 100644
index 000000000..d8ea6a881
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-default
@@ -0,0 +1,167 @@
+[config]
+command = stat
+args = kill >/dev/null 2>&1
+ret = 1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-1 b/tools/perf/tests/attr/test-stat-detailed-1
new file mode 100644
index 000000000..b656ab93c
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-detailed-1
@@ -0,0 +1,208 @@
+[config]
+command = stat
+args = -d kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event20:base-stat]
+fd=20
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event21:base-stat]
+fd=21
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event22:base-stat]
+fd=22
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event23:base-stat]
+fd=23
+type=3
+config=65538
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-2 b/tools/perf/tests/attr/test-stat-detailed-2
new file mode 100644
index 000000000..97625090a
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-detailed-2
@@ -0,0 +1,268 @@
+[config]
+command = stat
+args = -dd kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event20:base-stat]
+fd=20
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event21:base-stat]
+fd=21
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event22:base-stat]
+fd=22
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event23:base-stat]
+fd=23
+type=3
+config=65538
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event24:base-stat]
+fd=24
+type=3
+config=1
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event25:base-stat]
+fd=25
+type=3
+config=65537
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event26:base-stat]
+fd=26
+type=3
+config=3
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event27:base-stat]
+fd=27
+type=3
+config=65539
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event28:base-stat]
+fd=28
+type=3
+config=4
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event29:base-stat]
+fd=29
+type=3
+config=65540
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-detailed-3 b/tools/perf/tests/attr/test-stat-detailed-3
new file mode 100644
index 000000000..d555042e3
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-detailed-3
@@ -0,0 +1,288 @@
+[config]
+command = stat
+args = -ddd kill >/dev/null 2>&1
+ret = 1
+
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_TASK_CLOCK
+[event1:base-stat]
+fd=1
+type=1
+config=1
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CONTEXT_SWITCHES
+[event2:base-stat]
+fd=2
+type=1
+config=3
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_CPU_MIGRATIONS
+[event3:base-stat]
+fd=3
+type=1
+config=4
+
+# PERF_TYPE_SOFTWARE / PERF_COUNT_SW_PAGE_FAULTS
+[event4:base-stat]
+fd=4
+type=1
+config=2
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_CPU_CYCLES
+[event5:base-stat]
+fd=5
+type=0
+config=0
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+[event6:base-stat]
+fd=6
+type=0
+config=7
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+[event7:base-stat]
+fd=7
+type=0
+config=8
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_INSTRUCTIONS
+[event8:base-stat]
+fd=8
+type=0
+config=1
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_INSTRUCTIONS
+[event9:base-stat]
+fd=9
+type=0
+config=4
+optional=1
+
+# PERF_TYPE_HARDWARE / PERF_COUNT_HW_BRANCH_MISSES
+[event10:base-stat]
+fd=10
+type=0
+config=5
+optional=1
+
+# PERF_TYPE_RAW / slots (0x400)
+[event11:base-stat]
+fd=11
+group_fd=-1
+type=4
+config=1024
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-retiring (0x8000)
+[event12:base-stat]
+fd=12
+group_fd=11
+type=4
+config=32768
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+[event13:base-stat]
+fd=13
+group_fd=11
+type=4
+config=33024
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+[event14:base-stat]
+fd=14
+group_fd=11
+type=4
+config=33280
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+[event15:base-stat]
+fd=15
+group_fd=11
+type=4
+config=33536
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+[event16:base-stat]
+fd=16
+group_fd=11
+type=4
+config=33792
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+[event17:base-stat]
+fd=17
+group_fd=11
+type=4
+config=34048
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+[event18:base-stat]
+fd=18
+group_fd=11
+type=4
+config=34304
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+[event19:base-stat]
+fd=19
+group_fd=11
+type=4
+config=34560
+disabled=0
+enable_on_exec=0
+read_format=15
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event20:base-stat]
+fd=20
+type=3
+config=0
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event21:base-stat]
+fd=21
+type=3
+config=65536
+optional=1
+
+# PERF_TYPE_HW_CACHE /
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event22:base-stat]
+fd=22
+type=3
+config=2
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_LL << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event23:base-stat]
+fd=23
+type=3
+config=65538
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event24:base-stat]
+fd=24
+type=3
+config=1
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1I << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event25:base-stat]
+fd=25
+type=3
+config=65537
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event26:base-stat]
+fd=26
+type=3
+config=3
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_DTLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event27:base-stat]
+fd=27
+type=3
+config=65539
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event28:base-stat]
+fd=28
+type=3
+config=4
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_ITLB << 0 |
+# (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event29:base-stat]
+fd=29
+type=3
+config=65540
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16)
+[event30:base-stat]
+fd=30
+type=3
+config=512
+optional=1
+
+# PERF_TYPE_HW_CACHE,
+# PERF_COUNT_HW_CACHE_L1D << 0 |
+# (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
+# (PERF_COUNT_HW_CACHE_RESULT_MISS << 16)
+[event31:base-stat]
+fd=31
+type=3
+config=66048
+optional=1
diff --git a/tools/perf/tests/attr/test-stat-group b/tools/perf/tests/attr/test-stat-group
new file mode 100644
index 000000000..e15d6946e
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-group
@@ -0,0 +1,17 @@
+[config]
+command = stat
+args = --group -e cycles,instructions kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-stat]
+fd=1
+group_fd=-1
+read_format=3|15
+
+[event-2:base-stat]
+fd=2
+group_fd=1
+config=1
+disabled=0
+enable_on_exec=0
+read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-group1 b/tools/perf/tests/attr/test-stat-group1
new file mode 100644
index 000000000..174675112
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-group1
@@ -0,0 +1,17 @@
+[config]
+command = stat
+args = -e '{cycles,instructions}' kill >/dev/null 2>&1
+ret = 1
+
+[event-1:base-stat]
+fd=1
+group_fd=-1
+read_format=3|15
+
+[event-2:base-stat]
+fd=2
+group_fd=1
+config=1
+disabled=0
+enable_on_exec=0
+read_format=3|15
diff --git a/tools/perf/tests/attr/test-stat-no-inherit b/tools/perf/tests/attr/test-stat-no-inherit
new file mode 100644
index 000000000..924fbb930
--- /dev/null
+++ b/tools/perf/tests/attr/test-stat-no-inherit
@@ -0,0 +1,8 @@
+[config]
+command = stat
+args = -i -e cycles kill >/dev/null 2>&1
+ret = 1
+
+[event:base-stat]
+inherit=0
+optional=1
diff --git a/tools/perf/tests/backward-ring-buffer.c b/tools/perf/tests/backward-ring-buffer.c
new file mode 100644
index 000000000..79a980b1e
--- /dev/null
+++ b/tools/perf/tests/backward-ring-buffer.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test backward bit in event attribute, read ring buffer from end to
+ * beginning
+ */
+
+#include <evlist.h>
+#include <sys/prctl.h>
+#include "record.h"
+#include "tests.h"
+#include "debug.h"
+#include "parse-events.h"
+#include "util/mmap.h"
+#include <errno.h>
+#include <linux/string.h>
+#include <perf/mmap.h>
+
+#define NR_ITERS 111
+
+static void testcase(void)
+{
+ int i;
+
+ for (i = 0; i < NR_ITERS; i++) {
+ char proc_name[15];
+
+ snprintf(proc_name, sizeof(proc_name), "p:%d\n", i);
+ prctl(PR_SET_NAME, proc_name);
+ }
+}
+
+static int count_samples(struct evlist *evlist, int *sample_count,
+ int *comm_count)
+{
+ int i;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ struct mmap *map = &evlist->overwrite_mmap[i];
+ union perf_event *event;
+
+ perf_mmap__read_init(&map->core);
+ while ((event = perf_mmap__read_event(&map->core)) != NULL) {
+ const u32 type = event->header.type;
+
+ switch (type) {
+ case PERF_RECORD_SAMPLE:
+ (*sample_count)++;
+ break;
+ case PERF_RECORD_COMM:
+ (*comm_count)++;
+ break;
+ default:
+ pr_err("Unexpected record of type %d\n", type);
+ return TEST_FAIL;
+ }
+ }
+ perf_mmap__read_done(&map->core);
+ }
+ return TEST_OK;
+}
+
+static int do_test(struct evlist *evlist, int mmap_pages,
+ int *sample_count, int *comm_count)
+{
+ int err;
+ char sbuf[STRERR_BUFSIZE];
+
+ err = evlist__mmap(evlist, mmap_pages);
+ if (err < 0) {
+ pr_debug("evlist__mmap: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ return TEST_FAIL;
+ }
+
+ evlist__enable(evlist);
+ testcase();
+ evlist__disable(evlist);
+
+ err = count_samples(evlist, sample_count, comm_count);
+ evlist__munmap(evlist);
+ return err;
+}
+
+
+static int test__backward_ring_buffer(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_SKIP, err, sample_count = 0, comm_count = 0;
+ char pid[16], sbuf[STRERR_BUFSIZE];
+ struct evlist *evlist;
+ struct evsel *evsel __maybe_unused;
+ struct parse_events_error parse_error;
+ struct record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .freq = 0,
+ .mmap_pages = 256,
+ .default_interval = 1,
+ };
+
+ snprintf(pid, sizeof(pid), "%d", getpid());
+ pid[sizeof(pid) - 1] = '\0';
+ opts.target.tid = opts.target.pid = pid;
+
+ evlist = evlist__new();
+ if (!evlist) {
+ pr_debug("Not enough memory to create evlist\n");
+ return TEST_FAIL;
+ }
+
+ err = evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ parse_events_error__init(&parse_error);
+ /*
+ * Set backward bit, ring buffer should be writing from end. Record
+ * it in aux evlist
+ */
+ err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error);
+ parse_events_error__exit(&parse_error);
+ if (err) {
+ pr_debug("Failed to parse tracepoint event, try use root\n");
+ ret = TEST_SKIP;
+ goto out_delete_evlist;
+ }
+
+ evlist__config(evlist, &opts, NULL);
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ ret = TEST_FAIL;
+ err = do_test(evlist, opts.mmap_pages, &sample_count,
+ &comm_count);
+ if (err != TEST_OK)
+ goto out_delete_evlist;
+
+ if ((sample_count != NR_ITERS) || (comm_count != NR_ITERS)) {
+ pr_err("Unexpected counter: sample_count=%d, comm_count=%d\n",
+ sample_count, comm_count);
+ goto out_delete_evlist;
+ }
+
+ evlist__close(evlist);
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ err = do_test(evlist, 1, &sample_count, &comm_count);
+ if (err != TEST_OK)
+ goto out_delete_evlist;
+
+ ret = TEST_OK;
+out_delete_evlist:
+ evlist__delete(evlist);
+ return ret;
+}
+
+DEFINE_SUITE("Read backward ring buffer", backward_ring_buffer);
diff --git a/tools/perf/tests/bitmap.c b/tools/perf/tests/bitmap.c
new file mode 100644
index 000000000..4965dd666
--- /dev/null
+++ b/tools/perf/tests/bitmap.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/bitmap.h>
+#include <perf/cpumap.h>
+#include <internal/cpumap.h>
+#include "tests.h"
+#include "debug.h"
+
+#define NBITS 100
+
+static unsigned long *get_bitmap(const char *str, int nbits)
+{
+ struct perf_cpu_map *map = perf_cpu_map__new(str);
+ unsigned long *bm = NULL;
+ int i;
+
+ bm = bitmap_zalloc(nbits);
+
+ if (map && bm) {
+ for (i = 0; i < perf_cpu_map__nr(map); i++)
+ set_bit(perf_cpu_map__cpu(map, i).cpu, bm);
+ }
+
+ if (map)
+ perf_cpu_map__put(map);
+ return bm;
+}
+
+static int test_bitmap(const char *str)
+{
+ unsigned long *bm = get_bitmap(str, NBITS);
+ char buf[100];
+ int ret;
+
+ bitmap_scnprintf(bm, NBITS, buf, sizeof(buf));
+ pr_debug("bitmap: %s\n", buf);
+
+ ret = !strcmp(buf, str);
+ free(bm);
+ return ret;
+}
+
+static int test__bitmap_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,5"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3,5,7,9,11,13,15,17,19,21-40"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("2-5"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", test_bitmap("1-10,12-20,22-30,32-40"));
+ return 0;
+}
+
+DEFINE_SUITE("Print bitmap", bitmap_print);
diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
new file mode 100644
index 000000000..6f921db33
--- /dev/null
+++ b/tools/perf/tests/bp_account.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <linux/hw_breakpoint.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "event.h"
+#include "../perf-sys.h"
+#include "cloexec.h"
+
+/*
+ * PowerPC and S390 do not support creation of instruction breakpoints using the
+ * perf_event interface.
+ *
+ * Just disable the test for these architectures until these issues are
+ * resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__)
+#define BP_ACCOUNT_IS_SUPPORTED 0
+#else
+#define BP_ACCOUNT_IS_SUPPORTED 1
+#endif
+
+static volatile long the_var;
+
+static noinline int test_function(void)
+{
+ return 0;
+}
+
+static int __event(bool is_x, void *addr, struct perf_event_attr *attr)
+{
+ int fd;
+
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+
+ attr->config = 0;
+ attr->bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
+ attr->bp_addr = (unsigned long) addr;
+ attr->bp_len = sizeof(long);
+
+ attr->sample_period = 1;
+ attr->sample_type = PERF_SAMPLE_IP;
+
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+
+ fd = sys_perf_event_open(attr, -1, 0, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ pr_debug("failed opening event %llx\n", attr->config);
+ return TEST_FAIL;
+ }
+
+ return fd;
+}
+
+static int wp_event(void *addr, struct perf_event_attr *attr)
+{
+ return __event(false, addr, attr);
+}
+
+static int bp_event(void *addr, struct perf_event_attr *attr)
+{
+ return __event(true, addr, attr);
+}
+
+static int bp_accounting(int wp_cnt, int share)
+{
+ struct perf_event_attr attr, attr_mod, attr_new;
+ int i, fd[wp_cnt], fd_wp, ret;
+
+ for (i = 0; i < wp_cnt; i++) {
+ fd[i] = wp_event((void *)&the_var, &attr);
+ TEST_ASSERT_VAL("failed to create wp\n", fd[i] != -1);
+ pr_debug("wp %d created\n", i);
+ }
+
+ attr_mod = attr;
+ attr_mod.bp_type = HW_BREAKPOINT_X;
+ attr_mod.bp_addr = (unsigned long) test_function;
+
+ ret = ioctl(fd[0], PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr_mod);
+ TEST_ASSERT_VAL("failed to modify wp\n", ret == 0);
+
+ pr_debug("wp 0 modified to bp\n");
+
+ if (!share) {
+ fd_wp = wp_event((void *)&the_var, &attr_new);
+ TEST_ASSERT_VAL("failed to create max wp\n", fd_wp != -1);
+ pr_debug("wp max created\n");
+ }
+
+ for (i = 0; i < wp_cnt; i++)
+ close(fd[i]);
+
+ return 0;
+}
+
+static int detect_cnt(bool is_x)
+{
+ struct perf_event_attr attr;
+ void *addr = is_x ? (void *)test_function : (void *)&the_var;
+ int fd[100], cnt = 0, i;
+
+ while (1) {
+ if (cnt == 100) {
+ pr_debug("way too many debug registers, fix the test\n");
+ return 0;
+ }
+ fd[cnt] = __event(is_x, addr, &attr);
+
+ if (fd[cnt] < 0)
+ break;
+ cnt++;
+ }
+
+ for (i = 0; i < cnt; i++)
+ close(fd[i]);
+
+ return cnt;
+}
+
+static int detect_ioctl(void)
+{
+ struct perf_event_attr attr;
+ int fd, ret = 1;
+
+ fd = wp_event((void *) &the_var, &attr);
+ if (fd > 0) {
+ ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &attr);
+ close(fd);
+ }
+
+ return ret ? 0 : 1;
+}
+
+static int detect_share(int wp_cnt, int bp_cnt)
+{
+ struct perf_event_attr attr;
+ int i, *fd = NULL, ret = -1;
+
+ if (wp_cnt + bp_cnt == 0)
+ return 0;
+
+ fd = malloc(sizeof(int) * (wp_cnt + bp_cnt));
+ if (!fd)
+ return -1;
+
+ for (i = 0; i < wp_cnt; i++) {
+ fd[i] = wp_event((void *)&the_var, &attr);
+ if (fd[i] == -1) {
+ pr_err("failed to create wp\n");
+ goto out;
+ }
+ }
+
+ for (; i < (bp_cnt + wp_cnt); i++) {
+ fd[i] = bp_event((void *)test_function, &attr);
+ if (fd[i] == -1)
+ break;
+ }
+
+ ret = i != (bp_cnt + wp_cnt);
+
+out:
+ while (i--)
+ close(fd[i]);
+
+ free(fd);
+ return ret;
+}
+
+/*
+ * This test does following:
+ * - detects the number of watch/break-points,
+ * skip test if any is missing
+ * - detects PERF_EVENT_IOC_MODIFY_ATTRIBUTES ioctl,
+ * skip test if it's missing
+ * - detects if watchpoints and breakpoints share
+ * same slots
+ * - create all possible watchpoints on cpu 0
+ * - change one of it to breakpoint
+ * - in case wp and bp do not share slots,
+ * we create another watchpoint to ensure
+ * the slot accounting is correct
+ */
+static int test__bp_accounting(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int has_ioctl = detect_ioctl();
+ int wp_cnt = detect_cnt(false);
+ int bp_cnt = detect_cnt(true);
+ int share = detect_share(wp_cnt, bp_cnt);
+
+ if (!BP_ACCOUNT_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
+ pr_debug("watchpoints count %d, breakpoints count %d, has_ioctl %d, share %d\n",
+ wp_cnt, bp_cnt, has_ioctl, share);
+
+ if (!wp_cnt || !bp_cnt || !has_ioctl)
+ return TEST_SKIP;
+
+ return bp_accounting(wp_cnt, share);
+}
+
+DEFINE_SUITE("Breakpoint accounting", bp_accounting);
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
new file mode 100644
index 000000000..1f2908f02
--- /dev/null
+++ b/tools/perf/tests/bp_signal.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Inspired by breakpoint overflow test done by
+ * Vince Weaver <vincent.weaver@maine.edu> for perf_event_tests
+ * (git://github.com/deater/perf_event_tests)
+ */
+
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <time.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/compiler.h>
+#include <linux/hw_breakpoint.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "event.h"
+#include "perf-sys.h"
+#include "cloexec.h"
+
+static int fd1;
+static int fd2;
+static int fd3;
+static int overflows;
+static int overflows_2;
+
+volatile long the_var;
+
+
+/*
+ * Use ASM to ensure watchpoint and breakpoint can be triggered
+ * at one instruction.
+ */
+#if defined (__x86_64__)
+extern void __test_function(volatile long *ptr);
+asm (
+ ".pushsection .text;"
+ ".globl __test_function\n"
+ ".type __test_function, @function;"
+ "__test_function:\n"
+ "incq (%rdi)\n"
+ "ret\n"
+ ".popsection\n");
+#else
+static void __test_function(volatile long *ptr)
+{
+ *ptr = 0x1234;
+}
+#endif
+
+static noinline int test_function(void)
+{
+ __test_function(&the_var);
+ the_var++;
+ return time(NULL);
+}
+
+static void sig_handler_2(int signum __maybe_unused,
+ siginfo_t *oh __maybe_unused,
+ void *uc __maybe_unused)
+{
+ overflows_2++;
+ if (overflows_2 > 10) {
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
+ }
+}
+
+static void sig_handler(int signum __maybe_unused,
+ siginfo_t *oh __maybe_unused,
+ void *uc __maybe_unused)
+{
+ overflows++;
+
+ if (overflows > 10) {
+ /*
+ * This should be executed only once during
+ * this test, if we are here for the 10th
+ * time, consider this the recursive issue.
+ *
+ * We can get out of here by disable events,
+ * so no new SIGIO is delivered.
+ */
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
+ }
+}
+
+static int __event(bool is_x, void *addr, int sig)
+{
+ struct perf_event_attr pe;
+ int fd;
+
+ memset(&pe, 0, sizeof(struct perf_event_attr));
+ pe.type = PERF_TYPE_BREAKPOINT;
+ pe.size = sizeof(struct perf_event_attr);
+
+ pe.config = 0;
+ pe.bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
+ pe.bp_addr = (unsigned long) addr;
+ pe.bp_len = sizeof(long);
+
+ pe.sample_period = 1;
+ pe.sample_type = PERF_SAMPLE_IP;
+ pe.wakeup_events = 1;
+
+ pe.disabled = 1;
+ pe.exclude_kernel = 1;
+ pe.exclude_hv = 1;
+
+ fd = sys_perf_event_open(&pe, 0, -1, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ pr_debug("failed opening event %llx\n", pe.config);
+ return TEST_FAIL;
+ }
+
+ fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
+ fcntl(fd, F_SETSIG, sig);
+ fcntl(fd, F_SETOWN, getpid());
+
+ ioctl(fd, PERF_EVENT_IOC_RESET, 0);
+
+ return fd;
+}
+
+static int bp_event(void *addr, int sig)
+{
+ return __event(true, addr, sig);
+}
+
+static int wp_event(void *addr, int sig)
+{
+ return __event(false, addr, sig);
+}
+
+static long long bp_count(int fd)
+{
+ long long count;
+ int ret;
+
+ ret = read(fd, &count, sizeof(long long));
+ if (ret != sizeof(long long)) {
+ pr_debug("failed to read: %d\n", ret);
+ return TEST_FAIL;
+ }
+
+ return count;
+}
+
+static int test__bp_signal(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct sigaction sa;
+ long long count1, count2, count3;
+
+ if (!BP_SIGNAL_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
+ /* setup SIGIO signal handler */
+ memset(&sa, 0, sizeof(struct sigaction));
+ sa.sa_sigaction = (void *) sig_handler;
+ sa.sa_flags = SA_SIGINFO;
+
+ if (sigaction(SIGIO, &sa, NULL) < 0) {
+ pr_debug("failed setting up signal handler\n");
+ return TEST_FAIL;
+ }
+
+ sa.sa_sigaction = (void *) sig_handler_2;
+ if (sigaction(SIGUSR1, &sa, NULL) < 0) {
+ pr_debug("failed setting up signal handler 2\n");
+ return TEST_FAIL;
+ }
+
+ /*
+ * We create following events:
+ *
+ * fd1 - breakpoint event on __test_function with SIGIO
+ * signal configured. We should get signal
+ * notification each time the breakpoint is hit
+ *
+ * fd2 - breakpoint event on sig_handler with SIGUSR1
+ * configured. We should get SIGUSR1 each time when
+ * breakpoint is hit
+ *
+ * fd3 - watchpoint event on __test_function with SIGIO
+ * configured.
+ *
+ * Following processing should happen:
+ * Exec: Action: Result:
+ * incq (%rdi) - fd1 event breakpoint hit -> count1 == 1
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 1
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 1 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows = 1
+ * sys_rt_sigreturn - return from sig_handler
+ * incq (%rdi) - fd3 event watchpoint hit -> count3 == 1 (wp and bp in one insn)
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 2
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 2 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows = 2
+ * sys_rt_sigreturn - return from sig_handler
+ * the_var++ - fd3 event watchpoint hit -> count3 == 2 (standalone watchpoint)
+ * - SIGIO is delivered
+ * sig_handler - fd2 event breakpoint hit -> count2 == 3
+ * - SIGUSR1 is delivered
+ * sig_handler_2 -> overflows_2 == 3 (nested signal)
+ * sys_rt_sigreturn - return from sig_handler_2
+ * overflows++ -> overflows == 3
+ * sys_rt_sigreturn - return from sig_handler
+ *
+ * The test case check following error conditions:
+ * - we get stuck in signal handler because of debug
+ * exception being triggered recursively due to
+ * the wrong RF EFLAG management
+ *
+ * - we never trigger the sig_handler breakpoint due
+ * to the wrong RF EFLAG management
+ *
+ */
+
+ fd1 = bp_event(__test_function, SIGIO);
+ fd2 = bp_event(sig_handler, SIGUSR1);
+ fd3 = wp_event((void *)&the_var, SIGIO);
+
+ ioctl(fd1, PERF_EVENT_IOC_ENABLE, 0);
+ ioctl(fd2, PERF_EVENT_IOC_ENABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_ENABLE, 0);
+
+ /*
+ * Kick off the test by triggering 'fd1'
+ * breakpoint.
+ */
+ test_function();
+
+ ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+ ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
+
+ count1 = bp_count(fd1);
+ count2 = bp_count(fd2);
+ count3 = bp_count(fd3);
+
+ close(fd1);
+ close(fd2);
+ close(fd3);
+
+ pr_debug("count1 %lld, count2 %lld, count3 %lld, overflow %d, overflows_2 %d\n",
+ count1, count2, count3, overflows, overflows_2);
+
+ if (count1 != 1) {
+ if (count1 == 11)
+ pr_debug("failed: RF EFLAG recursion issue detected\n");
+ else
+ pr_debug("failed: wrong count for bp1: %lld, expected 1\n", count1);
+ }
+
+ if (overflows != 3)
+ pr_debug("failed: wrong overflow (%d) hit, expected 3\n", overflows);
+
+ if (overflows_2 != 3)
+ pr_debug("failed: wrong overflow_2 (%d) hit, expected 3\n", overflows_2);
+
+ if (count2 != 3)
+ pr_debug("failed: wrong count for bp2 (%lld), expected 3\n", count2);
+
+ if (count3 != 2)
+ pr_debug("failed: wrong count for bp3 (%lld), expected 2\n", count3);
+
+ return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
+ TEST_OK : TEST_FAIL;
+}
+
+DEFINE_SUITE("Breakpoint overflow signal handler", bp_signal);
diff --git a/tools/perf/tests/bp_signal_overflow.c b/tools/perf/tests/bp_signal_overflow.c
new file mode 100644
index 000000000..4e897c2cf
--- /dev/null
+++ b/tools/perf/tests/bp_signal_overflow.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Originally done by Vince Weaver <vincent.weaver@maine.edu> for
+ * perf_event_tests (git://github.com/deater/perf_event_tests)
+ */
+
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <time.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/compiler.h>
+#include <linux/hw_breakpoint.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "event.h"
+#include "../perf-sys.h"
+#include "cloexec.h"
+
+static int overflows;
+
+static noinline int test_function(void)
+{
+ return time(NULL);
+}
+
+static void sig_handler(int signum __maybe_unused,
+ siginfo_t *oh __maybe_unused,
+ void *uc __maybe_unused)
+{
+ overflows++;
+}
+
+static long long bp_count(int fd)
+{
+ long long count;
+ int ret;
+
+ ret = read(fd, &count, sizeof(long long));
+ if (ret != sizeof(long long)) {
+ pr_debug("failed to read: %d\n", ret);
+ return TEST_FAIL;
+ }
+
+ return count;
+}
+
+#define EXECUTIONS 10000
+#define THRESHOLD 100
+
+static int test__bp_signal_overflow(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_event_attr pe;
+ struct sigaction sa;
+ long long count;
+ int fd, i, fails = 0;
+
+ if (!BP_SIGNAL_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
+ /* setup SIGIO signal handler */
+ memset(&sa, 0, sizeof(struct sigaction));
+ sa.sa_sigaction = (void *) sig_handler;
+ sa.sa_flags = SA_SIGINFO;
+
+ if (sigaction(SIGIO, &sa, NULL) < 0) {
+ pr_debug("failed setting up signal handler\n");
+ return TEST_FAIL;
+ }
+
+ memset(&pe, 0, sizeof(struct perf_event_attr));
+ pe.type = PERF_TYPE_BREAKPOINT;
+ pe.size = sizeof(struct perf_event_attr);
+
+ pe.config = 0;
+ pe.bp_type = HW_BREAKPOINT_X;
+ pe.bp_addr = (unsigned long) test_function;
+ pe.bp_len = sizeof(long);
+
+ pe.sample_period = THRESHOLD;
+ pe.sample_type = PERF_SAMPLE_IP;
+ pe.wakeup_events = 1;
+
+ pe.disabled = 1;
+ pe.exclude_kernel = 1;
+ pe.exclude_hv = 1;
+
+ fd = sys_perf_event_open(&pe, 0, -1, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ pr_debug("failed opening event %llx\n", pe.config);
+ return TEST_FAIL;
+ }
+
+ fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
+ fcntl(fd, F_SETSIG, SIGIO);
+ fcntl(fd, F_SETOWN, getpid());
+
+ ioctl(fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+
+ for (i = 0; i < EXECUTIONS; i++)
+ test_function();
+
+ ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
+
+ count = bp_count(fd);
+
+ close(fd);
+
+ pr_debug("count %lld, overflow %d\n",
+ count, overflows);
+
+ if (count != EXECUTIONS) {
+ pr_debug("\tWrong number of executions %lld != %d\n",
+ count, EXECUTIONS);
+ fails++;
+ }
+
+ if (overflows != EXECUTIONS / THRESHOLD) {
+ pr_debug("\tWrong number of overflows %d != %d\n",
+ overflows, EXECUTIONS / THRESHOLD);
+ fails++;
+ }
+
+ return fails ? TEST_FAIL : TEST_OK;
+}
+
+DEFINE_SUITE("Breakpoint overflow sampling", bp_signal_overflow);
diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c
new file mode 100644
index 000000000..7981c69ed
--- /dev/null
+++ b/tools/perf/tests/bpf-script-example.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf-script-example.c
+ * Test basic LLVM building
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define BPF_ANY 0
+#define BPF_MAP_TYPE_ARRAY 2
+#define BPF_FUNC_map_lookup_elem 1
+#define BPF_FUNC_map_update_elem 2
+
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+ (void *) BPF_FUNC_map_lookup_elem;
+static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
+ (void *) BPF_FUNC_map_update_elem;
+
+/*
+ * Following macros are taken from tools/lib/bpf/bpf_helpers.h,
+ * and are used to create BTF defined maps. It is easier to take
+ * 2 simple macros, than being able to include above header in
+ * runtime.
+ *
+ * __uint - defines integer attribute of BTF map definition,
+ * Such attributes are represented using a pointer to an array,
+ * in which dimensionality of array encodes specified integer
+ * value.
+ *
+ * __type - defines pointer variable with typeof(val) type for
+ * attributes like key or value, which will be defined by the
+ * size of the type.
+ */
+#define __uint(name, val) int (*name)[val]
+#define __type(name, val) typeof(val) *name
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} flip_table SEC(".maps");
+
+SEC("func=do_epoll_wait")
+int bpf_func__SyS_epoll_pwait(void *ctx)
+{
+ int ind =0;
+ int *flag = bpf_map_lookup_elem(&flip_table, &ind);
+ int new_flag;
+ if (!flag)
+ return 0;
+ /* flip flag and store back */
+ new_flag = !*flag;
+ bpf_map_update_elem(&flip_table, &ind, &new_flag, BPF_ANY);
+ return new_flag;
+}
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c
new file mode 100644
index 000000000..219673aa2
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-kbuild.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf-script-test-kbuild.c
+ * Test include from kernel header
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+#include <uapi/linux/fs.h>
+
+SEC("func=vfs_llseek")
+int bpf_func__vfs_llseek(void *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-prologue.c b/tools/perf/tests/bpf-script-test-prologue.c
new file mode 100644
index 000000000..bd83d364c
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-prologue.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf-script-test-prologue.c
+ * Test BPF prologue
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define SEC(NAME) __attribute__((section(NAME), used))
+
+#include <uapi/linux/fs.h>
+
+/*
+ * If CONFIG_PROFILE_ALL_BRANCHES is selected,
+ * 'if' is redefined after include kernel header.
+ * Recover 'if' for BPF object code.
+ */
+#ifdef if
+# undef if
+#endif
+
+#define FMODE_READ 0x1
+#define FMODE_WRITE 0x2
+
+static void (*bpf_trace_printk)(const char *fmt, int fmt_size, ...) =
+ (void *) 6;
+
+SEC("func=null_lseek file->f_mode offset orig")
+int bpf_func__null_lseek(void *ctx, int err, unsigned long _f_mode,
+ unsigned long offset, unsigned long orig)
+{
+ fmode_t f_mode = (fmode_t)_f_mode;
+
+ if (err)
+ return 0;
+ if (f_mode & FMODE_WRITE)
+ return 0;
+ if (offset & 1)
+ return 0;
+ if (orig == SEEK_CUR)
+ return 0;
+ return 1;
+}
+
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf-script-test-relocation.c b/tools/perf/tests/bpf-script-test-relocation.c
new file mode 100644
index 000000000..74006e4b2
--- /dev/null
+++ b/tools/perf/tests/bpf-script-test-relocation.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bpf-script-test-relocation.c
+ * Test BPF loader checking relocation
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define BPF_ANY 0
+#define BPF_MAP_TYPE_ARRAY 2
+#define BPF_FUNC_map_lookup_elem 1
+#define BPF_FUNC_map_update_elem 2
+
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+ (void *) BPF_FUNC_map_lookup_elem;
+static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
+ (void *) BPF_FUNC_map_update_elem;
+
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+};
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+struct bpf_map_def SEC("maps") my_table = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 1,
+};
+
+int this_is_a_global_val;
+
+SEC("func=sys_write")
+int bpf_func__sys_write(void *ctx)
+{
+ int key = 0;
+ int value = 0;
+
+ /*
+ * Incorrect relocation. Should not allow this program be
+ * loaded into kernel.
+ */
+ bpf_map_update_elem(&this_is_a_global_val, &key, &value, 0);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
new file mode 100644
index 000000000..6a4235a9c
--- /dev/null
+++ b/tools/perf/tests/bpf.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <util/record.h>
+#include <util/util.h>
+#include <util/bpf-loader.h>
+#include <util/evlist.h>
+#include <linux/filter.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <api/fs/fs.h>
+#include <perf/mmap.h>
+#include "tests.h"
+#include "llvm.h"
+#include "debug.h"
+#include "parse-events.h"
+#include "util/mmap.h"
+#define NR_ITERS 111
+#define PERF_TEST_BPF_PATH "/sys/fs/bpf/perf_test"
+
+#ifdef HAVE_LIBBPF_SUPPORT
+#include <linux/bpf.h>
+#include <bpf/bpf.h>
+
+static int epoll_pwait_loop(void)
+{
+ int i;
+
+ /* Should fail NR_ITERS times */
+ for (i = 0; i < NR_ITERS; i++)
+ epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
+ return 0;
+}
+
+#ifdef HAVE_BPF_PROLOGUE
+
+static int llseek_loop(void)
+{
+ int fds[2], i;
+
+ fds[0] = open("/dev/null", O_RDONLY);
+ fds[1] = open("/dev/null", O_RDWR);
+
+ if (fds[0] < 0 || fds[1] < 0)
+ return -1;
+
+ for (i = 0; i < NR_ITERS; i++) {
+ lseek(fds[i % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+ lseek(fds[(i + 1) % 2], i, (i / 2) % 2 ? SEEK_CUR : SEEK_SET);
+ }
+ close(fds[0]);
+ close(fds[1]);
+ return 0;
+}
+
+#endif
+
+static struct {
+ enum test_llvm__testcase prog_id;
+ const char *name;
+ const char *msg_compile_fail;
+ const char *msg_load_fail;
+ int (*target_func)(void);
+ int expect_result;
+ bool pin;
+} bpf_testcase_table[] = {
+ {
+ .prog_id = LLVM_TESTCASE_BASE,
+ .name = "[basic_bpf_test]",
+ .msg_compile_fail = "fix 'perf test LLVM' first",
+ .msg_load_fail = "load bpf object failed",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
+ },
+ {
+ .prog_id = LLVM_TESTCASE_BASE,
+ .name = "[bpf_pinning]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &epoll_pwait_loop,
+ .expect_result = (NR_ITERS + 1) / 2,
+ .pin = true,
+ },
+#ifdef HAVE_BPF_PROLOGUE
+ {
+ .prog_id = LLVM_TESTCASE_BPF_PROLOGUE,
+ .name = "[bpf_prologue_test]",
+ .msg_compile_fail = "fix kbuild first",
+ .msg_load_fail = "check your vmlinux setting?",
+ .target_func = &llseek_loop,
+ .expect_result = (NR_ITERS + 1) / 4,
+ },
+#endif
+};
+
+static int do_test(struct bpf_object *obj, int (*func)(void),
+ int expect)
+{
+ struct record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .freq = 0,
+ .mmap_pages = 256,
+ .default_interval = 1,
+ };
+
+ char pid[16];
+ char sbuf[STRERR_BUFSIZE];
+ struct evlist *evlist;
+ int i, ret = TEST_FAIL, err = 0, count = 0;
+
+ struct parse_events_state parse_state;
+ struct parse_events_error parse_error;
+
+ parse_events_error__init(&parse_error);
+ bzero(&parse_state, sizeof(parse_state));
+ parse_state.error = &parse_error;
+ INIT_LIST_HEAD(&parse_state.list);
+
+ err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
+ parse_events_error__exit(&parse_error);
+ if (err == -ENODATA) {
+ pr_debug("Failed to add events selected by BPF, debuginfo package not installed\n");
+ return TEST_SKIP;
+ }
+ if (err || list_empty(&parse_state.list)) {
+ pr_debug("Failed to add events selected by BPF\n");
+ return TEST_FAIL;
+ }
+
+ snprintf(pid, sizeof(pid), "%d", getpid());
+ pid[sizeof(pid) - 1] = '\0';
+ opts.target.tid = opts.target.pid = pid;
+
+ /* Instead of evlist__new_default, don't add default events */
+ evlist = evlist__new();
+ if (!evlist) {
+ pr_debug("Not enough memory to create evlist\n");
+ return TEST_FAIL;
+ }
+
+ err = evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ evlist__splice_list_tail(evlist, &parse_state.list);
+ evlist->core.nr_groups = parse_state.nr_groups;
+
+ evlist__config(evlist, &opts, NULL);
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ err = evlist__mmap(evlist, opts.mmap_pages);
+ if (err < 0) {
+ pr_debug("evlist__mmap: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ evlist__enable(evlist);
+ (*func)();
+ evlist__disable(evlist);
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ union perf_event *event;
+ struct mmap *md;
+
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ const u32 type = event->header.type;
+
+ if (type == PERF_RECORD_SAMPLE)
+ count ++;
+ }
+ perf_mmap__read_done(&md->core);
+ }
+
+ if (count != expect * evlist->core.nr_entries) {
+ pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect * evlist->core.nr_entries, count);
+ goto out_delete_evlist;
+ }
+
+ ret = TEST_OK;
+
+out_delete_evlist:
+ evlist__delete(evlist);
+ return ret;
+}
+
+static struct bpf_object *
+prepare_bpf(void *obj_buf, size_t obj_buf_sz, const char *name)
+{
+ struct bpf_object *obj;
+
+ obj = bpf__prepare_load_buffer(obj_buf, obj_buf_sz, name);
+ if (IS_ERR(obj)) {
+ pr_debug("Compile BPF program failed.\n");
+ return NULL;
+ }
+ return obj;
+}
+
+static int __test__bpf(int idx)
+{
+ int ret;
+ void *obj_buf;
+ size_t obj_buf_sz;
+ struct bpf_object *obj;
+
+ ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+ bpf_testcase_table[idx].prog_id,
+ false, NULL);
+ if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
+ pr_debug("Unable to get BPF object, %s\n",
+ bpf_testcase_table[idx].msg_compile_fail);
+ if ((idx == 0) || (ret == TEST_SKIP))
+ return TEST_SKIP;
+ else
+ return TEST_FAIL;
+ }
+
+ obj = prepare_bpf(obj_buf, obj_buf_sz,
+ bpf_testcase_table[idx].name);
+ if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
+ if (!obj)
+ pr_debug("Fail to load BPF object: %s\n",
+ bpf_testcase_table[idx].msg_load_fail);
+ else
+ pr_debug("Success unexpectedly: %s\n",
+ bpf_testcase_table[idx].msg_load_fail);
+ ret = TEST_FAIL;
+ goto out;
+ }
+
+ if (obj) {
+ ret = do_test(obj,
+ bpf_testcase_table[idx].target_func,
+ bpf_testcase_table[idx].expect_result);
+ if (ret != TEST_OK)
+ goto out;
+ if (bpf_testcase_table[idx].pin) {
+ int err;
+
+ if (!bpf_fs__mount()) {
+ pr_debug("BPF filesystem not mounted\n");
+ ret = TEST_FAIL;
+ goto out;
+ }
+ err = mkdir(PERF_TEST_BPF_PATH, 0777);
+ if (err && errno != EEXIST) {
+ pr_debug("Failed to make perf_test dir: %s\n",
+ strerror(errno));
+ ret = TEST_FAIL;
+ goto out;
+ }
+ if (bpf_object__pin(obj, PERF_TEST_BPF_PATH))
+ ret = TEST_FAIL;
+ if (rm_rf(PERF_TEST_BPF_PATH))
+ ret = TEST_FAIL;
+ }
+ }
+
+out:
+ free(obj_buf);
+ bpf__clear();
+ return ret;
+}
+
+static int check_env(void)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ int err;
+ char license[] = "GPL";
+
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ };
+
+ err = fetch_kernel_version(&opts.kern_version, NULL, 0);
+ if (err) {
+ pr_debug("Unable to get kernel version\n");
+ return err;
+ }
+ err = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, license, insns,
+ ARRAY_SIZE(insns), &opts);
+ if (err < 0) {
+ pr_err("Missing basic BPF support, skip this test: %s\n",
+ strerror(errno));
+ return err;
+ }
+ close(err);
+
+ return 0;
+}
+
+static int test__bpf(int i)
+{
+ int err;
+
+ if (i < 0 || i >= (int)ARRAY_SIZE(bpf_testcase_table))
+ return TEST_FAIL;
+
+ if (geteuid() != 0) {
+ pr_debug("Only root can run BPF test\n");
+ return TEST_SKIP;
+ }
+
+ if (check_env())
+ return TEST_SKIP;
+
+ err = __test__bpf(i);
+ return err;
+}
+#endif
+
+static int test__basic_bpf_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__bpf(0);
+#else
+ pr_debug("Skip BPF test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+static int test__bpf_pinning(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__bpf(1);
+#else
+ pr_debug("Skip BPF test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+static int test__bpf_prologue_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#if defined(HAVE_LIBBPF_SUPPORT) && defined(HAVE_BPF_PROLOGUE)
+ return test__bpf(2);
+#else
+ pr_debug("Skip BPF test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+
+static struct test_case bpf_tests[] = {
+#ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF filtering", basic_bpf_test),
+ TEST_CASE_REASON("BPF pinning", bpf_pinning,
+ "clang isn't installed or environment missing BPF support"),
+#ifdef HAVE_BPF_PROLOGUE
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test,
+ "clang/debuginfo isn't installed or environment missing BPF support"),
+#else
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+#endif
+#else
+ TEST_CASE_REASON("Basic BPF filtering", basic_bpf_test, "not compiled in"),
+ TEST_CASE_REASON("BPF pinning", bpf_pinning, "not compiled in"),
+ TEST_CASE_REASON("BPF prologue generation", bpf_prologue_test, "not compiled in"),
+#endif
+ { .name = NULL, }
+};
+
+struct test_suite suite__bpf = {
+ .desc = "BPF filter",
+ .test_cases = bpf_tests,
+};
diff --git a/tools/perf/tests/builtin-test-list.c b/tools/perf/tests/builtin-test-list.c
new file mode 100644
index 000000000..a65b9e547
--- /dev/null
+++ b/tools/perf/tests/builtin-test-list.c
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <subcmd/exec-cmd.h>
+#include <subcmd/parse-options.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include "builtin.h"
+#include "builtin-test-list.h"
+#include "color.h"
+#include "debug.h"
+#include "hist.h"
+#include "intlist.h"
+#include "string2.h"
+#include "symbol.h"
+#include "tests.h"
+#include "util/rlimit.h"
+
+
+/*
+ * As this is a singleton built once for the run of the process, there is
+ * no value in trying to free it and just let it stay around until process
+ * exits when it's cleaned up.
+ */
+static size_t files_num = 0;
+static struct script_file *files = NULL;
+static int files_max_width = 0;
+
+static const char *shell_tests__dir(char *path, size_t size)
+{
+ const char *devel_dirs[] = { "./tools/perf/tests", "./tests", };
+ char *exec_path;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
+ struct stat st;
+
+ if (!lstat(devel_dirs[i], &st)) {
+ scnprintf(path, size, "%s/shell", devel_dirs[i]);
+ if (!lstat(devel_dirs[i], &st))
+ return path;
+ }
+ }
+
+ /* Then installed path. */
+ exec_path = get_argv_exec_path();
+ scnprintf(path, size, "%s/tests/shell", exec_path);
+ free(exec_path);
+ return path;
+}
+
+static const char *shell_test__description(char *description, size_t size,
+ const char *path, const char *name)
+{
+ FILE *fp;
+ char filename[PATH_MAX];
+ int ch;
+
+ path__join(filename, sizeof(filename), path, name);
+ fp = fopen(filename, "r");
+ if (!fp)
+ return NULL;
+
+ /* Skip first line - should be #!/bin/sh Shebang */
+ do {
+ ch = fgetc(fp);
+ } while (ch != EOF && ch != '\n');
+
+ description = fgets(description, size, fp);
+ fclose(fp);
+
+ /* Assume first char on line is omment everything after that desc */
+ return description ? strim(description + 1) : NULL;
+}
+
+/* Is this full file path a shell script */
+static bool is_shell_script(const char *path)
+{
+ const char *ext;
+
+ ext = strrchr(path, '.');
+ if (!ext)
+ return false;
+ if (!strcmp(ext, ".sh")) { /* Has .sh extension */
+ if (access(path, R_OK | X_OK) == 0) /* Is executable */
+ return true;
+ }
+ return false;
+}
+
+/* Is this file in this dir a shell script (for test purposes) */
+static bool is_test_script(const char *path, const char *name)
+{
+ char filename[PATH_MAX];
+
+ path__join(filename, sizeof(filename), path, name);
+ if (!is_shell_script(filename)) return false;
+ return true;
+}
+
+/* Duplicate a string and fall over and die if we run out of memory */
+static char *strdup_check(const char *str)
+{
+ char *newstr;
+
+ newstr = strdup(str);
+ if (!newstr) {
+ pr_err("Out of memory while duplicating test script string\n");
+ abort();
+ }
+ return newstr;
+}
+
+static void append_script(const char *dir, const char *file, const char *desc)
+{
+ struct script_file *files_tmp;
+ size_t files_num_tmp;
+ int width;
+
+ files_num_tmp = files_num + 1;
+ if (files_num_tmp >= SIZE_MAX) {
+ pr_err("Too many script files\n");
+ abort();
+ }
+ /* Realloc is good enough, though we could realloc by chunks, not that
+ * anyone will ever measure performance here */
+ files_tmp = realloc(files,
+ (files_num_tmp + 1) * sizeof(struct script_file));
+ if (files_tmp == NULL) {
+ pr_err("Out of memory while building test list\n");
+ abort();
+ }
+ /* Add file to end and NULL terminate the struct array */
+ files = files_tmp;
+ files_num = files_num_tmp;
+ files[files_num - 1].dir = strdup_check(dir);
+ files[files_num - 1].file = strdup_check(file);
+ files[files_num - 1].desc = strdup_check(desc);
+ files[files_num].dir = NULL;
+ files[files_num].file = NULL;
+ files[files_num].desc = NULL;
+
+ width = strlen(desc); /* Track max width of desc */
+ if (width > files_max_width)
+ files_max_width = width;
+}
+
+static void append_scripts_in_dir(const char *path)
+{
+ struct dirent **entlist;
+ struct dirent *ent;
+ int n_dirs, i;
+ char filename[PATH_MAX];
+
+ /* List files, sorted by alpha */
+ n_dirs = scandir(path, &entlist, NULL, alphasort);
+ if (n_dirs == -1)
+ return;
+ for (i = 0; i < n_dirs && (ent = entlist[i]); i++) {
+ if (ent->d_name[0] == '.')
+ continue; /* Skip hidden files */
+ if (is_test_script(path, ent->d_name)) { /* It's a test */
+ char bf[256];
+ const char *desc = shell_test__description
+ (bf, sizeof(bf), path, ent->d_name);
+
+ if (desc) /* It has a desc line - valid script */
+ append_script(path, ent->d_name, desc);
+ } else if (is_directory(path, ent)) { /* Scan the subdir */
+ path__join(filename, sizeof(filename),
+ path, ent->d_name);
+ append_scripts_in_dir(filename);
+ }
+ }
+ for (i = 0; i < n_dirs; i++) /* Clean up */
+ zfree(&entlist[i]);
+ free(entlist);
+}
+
+const struct script_file *list_script_files(void)
+{
+ char path_dir[PATH_MAX];
+ const char *path;
+
+ if (files)
+ return files; /* Singleton - we already know our list */
+
+ path = shell_tests__dir(path_dir, sizeof(path_dir)); /* Walk dir */
+ append_scripts_in_dir(path);
+
+ return files;
+}
+
+int list_script_max_width(void)
+{
+ list_script_files(); /* Ensure we have scanned all scripts */
+ return files_max_width;
+}
diff --git a/tools/perf/tests/builtin-test-list.h b/tools/perf/tests/builtin-test-list.h
new file mode 100644
index 000000000..eb81f3aa6
--- /dev/null
+++ b/tools/perf/tests/builtin-test-list.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+struct script_file {
+ char *dir;
+ char *file;
+ char *desc;
+};
+
+/* List available script tests to run - singleton - never freed */
+const struct script_file *list_script_files(void);
+/* Get maximum width of description string */
+int list_script_max_width(void);
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
new file mode 100644
index 000000000..7122eae1d
--- /dev/null
+++ b/tools/perf/tests/builtin-test.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * builtin-test.c
+ *
+ * Builtin regression testing command: ever growing number of sanity tests
+ */
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include "builtin.h"
+#include "hist.h"
+#include "intlist.h"
+#include "tests.h"
+#include "debug.h"
+#include "color.h"
+#include <subcmd/parse-options.h>
+#include "string2.h"
+#include "symbol.h"
+#include "util/rlimit.h"
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <subcmd/exec-cmd.h>
+#include <linux/zalloc.h>
+
+#include "builtin-test-list.h"
+
+static bool dont_fork;
+
+struct test_suite *__weak arch_tests[] = {
+ NULL,
+};
+
+static struct test_suite *generic_tests[] = {
+ &suite__vmlinux_matches_kallsyms,
+ &suite__openat_syscall_event,
+ &suite__openat_syscall_event_on_all_cpus,
+ &suite__basic_mmap,
+ &suite__mem,
+ &suite__parse_events,
+ &suite__expr,
+ &suite__PERF_RECORD,
+ &suite__pmu,
+ &suite__pmu_events,
+ &suite__dso_data,
+ &suite__dso_data_cache,
+ &suite__dso_data_reopen,
+ &suite__perf_evsel__roundtrip_name_test,
+ &suite__perf_evsel__tp_sched_test,
+ &suite__syscall_openat_tp_fields,
+ &suite__attr,
+ &suite__hists_link,
+ &suite__python_use,
+ &suite__bp_signal,
+ &suite__bp_signal_overflow,
+ &suite__bp_accounting,
+ &suite__wp,
+ &suite__task_exit,
+ &suite__sw_clock_freq,
+ &suite__code_reading,
+ &suite__sample_parsing,
+ &suite__keep_tracking,
+ &suite__parse_no_sample_id_all,
+ &suite__hists_filter,
+ &suite__mmap_thread_lookup,
+ &suite__thread_maps_share,
+ &suite__hists_output,
+ &suite__hists_cumulate,
+ &suite__switch_tracking,
+ &suite__fdarray__filter,
+ &suite__fdarray__add,
+ &suite__kmod_path__parse,
+ &suite__thread_map,
+ &suite__llvm,
+ &suite__session_topology,
+ &suite__bpf,
+ &suite__thread_map_synthesize,
+ &suite__thread_map_remove,
+ &suite__cpu_map_synthesize,
+ &suite__synthesize_stat_config,
+ &suite__synthesize_stat,
+ &suite__synthesize_stat_round,
+ &suite__event_update,
+ &suite__event_times,
+ &suite__backward_ring_buffer,
+ &suite__cpu_map_print,
+ &suite__cpu_map_merge,
+ &suite__sdt_event,
+ &suite__is_printable_array,
+ &suite__bitmap_print,
+ &suite__perf_hooks,
+ &suite__clang,
+ &suite__unit_number__scnprint,
+ &suite__mem2node,
+ &suite__time_utils,
+ &suite__jit_write_elf,
+ &suite__pfm,
+ &suite__api_io,
+ &suite__maps__merge_in,
+ &suite__demangle_java,
+ &suite__demangle_ocaml,
+ &suite__parse_metric,
+ &suite__pe_file_parsing,
+ &suite__expand_cgroup_events,
+ &suite__perf_time_to_tsc,
+ &suite__dlfilter,
+ &suite__sigtrap,
+ NULL,
+};
+
+static struct test_suite **tests[] = {
+ generic_tests,
+ arch_tests,
+};
+
+static int num_subtests(const struct test_suite *t)
+{
+ int num;
+
+ if (!t->test_cases)
+ return 0;
+
+ num = 0;
+ while (t->test_cases[num].name)
+ num++;
+
+ return num;
+}
+
+static bool has_subtests(const struct test_suite *t)
+{
+ return num_subtests(t) > 1;
+}
+
+static const char *skip_reason(const struct test_suite *t, int subtest)
+{
+ if (!t->test_cases)
+ return NULL;
+
+ return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
+}
+
+static const char *test_description(const struct test_suite *t, int subtest)
+{
+ if (t->test_cases && subtest >= 0)
+ return t->test_cases[subtest].desc;
+
+ return t->desc;
+}
+
+static test_fnptr test_function(const struct test_suite *t, int subtest)
+{
+ if (subtest <= 0)
+ return t->test_cases[0].run_case;
+
+ return t->test_cases[subtest].run_case;
+}
+
+static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
+{
+ int i;
+
+ if (argc == 0)
+ return true;
+
+ for (i = 0; i < argc; ++i) {
+ char *end;
+ long nr = strtoul(argv[i], &end, 10);
+
+ if (*end == '\0') {
+ if (nr == curr + 1)
+ return true;
+ continue;
+ }
+
+ if (strcasestr(desc, argv[i]))
+ return true;
+ }
+
+ return false;
+}
+
+static int run_test(struct test_suite *test, int subtest)
+{
+ int status, err = -1, child = dont_fork ? 0 : fork();
+ char sbuf[STRERR_BUFSIZE];
+
+ if (child < 0) {
+ pr_err("failed to fork test: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ return -1;
+ }
+
+ if (!child) {
+ if (!dont_fork) {
+ pr_debug("test child forked, pid %d\n", getpid());
+
+ if (verbose <= 0) {
+ int nullfd = open("/dev/null", O_WRONLY);
+
+ if (nullfd >= 0) {
+ close(STDERR_FILENO);
+ close(STDOUT_FILENO);
+
+ dup2(nullfd, STDOUT_FILENO);
+ dup2(STDOUT_FILENO, STDERR_FILENO);
+ close(nullfd);
+ }
+ } else {
+ signal(SIGSEGV, sighandler_dump_stack);
+ signal(SIGFPE, sighandler_dump_stack);
+ }
+ }
+
+ err = test_function(test, subtest)(test, subtest);
+ if (!dont_fork)
+ exit(err);
+ }
+
+ if (!dont_fork) {
+ wait(&status);
+
+ if (WIFEXITED(status)) {
+ err = (signed char)WEXITSTATUS(status);
+ pr_debug("test child finished with %d\n", err);
+ } else if (WIFSIGNALED(status)) {
+ err = -1;
+ pr_debug("test child interrupted\n");
+ }
+ }
+
+ return err;
+}
+
+#define for_each_test(j, k, t) \
+ for (j = 0; j < ARRAY_SIZE(tests); j++) \
+ for (k = 0, t = tests[j][k]; tests[j][k]; k++, t = tests[j][k])
+
+static int test_and_print(struct test_suite *t, int subtest)
+{
+ int err;
+
+ pr_debug("\n--- start ---\n");
+ err = run_test(t, subtest);
+ pr_debug("---- end ----\n");
+
+ if (!has_subtests(t))
+ pr_debug("%s:", t->desc);
+ else
+ pr_debug("%s subtest %d:", t->desc, subtest + 1);
+
+ switch (err) {
+ case TEST_OK:
+ pr_info(" Ok\n");
+ break;
+ case TEST_SKIP: {
+ const char *reason = skip_reason(t, subtest);
+
+ if (reason)
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
+ else
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
+ }
+ break;
+ case TEST_FAIL:
+ default:
+ color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
+ break;
+ }
+
+ return err;
+}
+
+struct shell_test {
+ const char *dir;
+ const char *file;
+};
+
+static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
+{
+ int err;
+ char script[PATH_MAX];
+ struct shell_test *st = test->priv;
+
+ path__join(script, sizeof(script) - 3, st->dir, st->file);
+
+ if (verbose)
+ strncat(script, " -v", sizeof(script) - strlen(script) - 1);
+
+ err = system(script);
+ if (!err)
+ return TEST_OK;
+
+ return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
+}
+
+static int run_shell_tests(int argc, const char *argv[], int i, int width,
+ struct intlist *skiplist)
+{
+ struct shell_test st;
+ const struct script_file *files, *file;
+
+ files = list_script_files();
+ if (!files)
+ return 0;
+ for (file = files; file->dir; file++) {
+ int curr = i++;
+ struct test_case test_cases[] = {
+ {
+ .desc = file->desc,
+ .run_case = shell_test__run,
+ },
+ { .name = NULL, }
+ };
+ struct test_suite test_suite = {
+ .desc = test_cases[0].desc,
+ .test_cases = test_cases,
+ .priv = &st,
+ };
+ st.dir = file->dir;
+
+ if (test_suite.desc == NULL ||
+ !perf_test__matches(test_suite.desc, curr, argc, argv))
+ continue;
+
+ st.file = file->file;
+ pr_info("%3d: %-*s:", i, width, test_suite.desc);
+
+ if (intlist__find(skiplist, i)) {
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
+ continue;
+ }
+
+ test_and_print(&test_suite, 0);
+ }
+ return 0;
+}
+
+static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
+{
+ struct test_suite *t;
+ unsigned int j, k;
+ int i = 0;
+ int width = list_script_max_width();
+
+ for_each_test(j, k, t) {
+ int len = strlen(test_description(t, -1));
+
+ if (width < len)
+ width = len;
+ }
+
+ for_each_test(j, k, t) {
+ int curr = i++;
+ int subi;
+
+ if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
+ bool skip = true;
+ int subn;
+
+ subn = num_subtests(t);
+
+ for (subi = 0; subi < subn; subi++) {
+ if (perf_test__matches(test_description(t, subi),
+ curr, argc, argv))
+ skip = false;
+ }
+
+ if (skip)
+ continue;
+ }
+
+ pr_info("%3d: %-*s:", i, width, test_description(t, -1));
+
+ if (intlist__find(skiplist, i)) {
+ color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
+ continue;
+ }
+
+ if (!has_subtests(t)) {
+ test_and_print(t, -1);
+ } else {
+ int subn = num_subtests(t);
+ /*
+ * minus 2 to align with normal testcases.
+ * For subtest we print additional '.x' in number.
+ * for example:
+ *
+ * 35: Test LLVM searching and compiling :
+ * 35.1: Basic BPF llvm compiling test : Ok
+ */
+ int subw = width > 2 ? width - 2 : width;
+
+ if (subn <= 0) {
+ color_fprintf(stderr, PERF_COLOR_YELLOW,
+ " Skip (not compiled in)\n");
+ continue;
+ }
+ pr_info("\n");
+
+ for (subi = 0; subi < subn; subi++) {
+ int len = strlen(test_description(t, subi));
+
+ if (subw < len)
+ subw = len;
+ }
+
+ for (subi = 0; subi < subn; subi++) {
+ if (!perf_test__matches(test_description(t, subi),
+ curr, argc, argv))
+ continue;
+
+ pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
+ test_description(t, subi));
+ test_and_print(t, subi);
+ }
+ }
+ }
+
+ return run_shell_tests(argc, argv, i, width, skiplist);
+}
+
+static int perf_test__list_shell(int argc, const char **argv, int i)
+{
+ const struct script_file *files, *file;
+
+ files = list_script_files();
+ if (!files)
+ return 0;
+ for (file = files; file->dir; file++) {
+ int curr = i++;
+ struct test_suite t = {
+ .desc = file->desc
+ };
+
+ if (!perf_test__matches(t.desc, curr, argc, argv))
+ continue;
+
+ pr_info("%3d: %s\n", i, t.desc);
+ }
+ return 0;
+}
+
+static int perf_test__list(int argc, const char **argv)
+{
+ unsigned int j, k;
+ struct test_suite *t;
+ int i = 0;
+
+ for_each_test(j, k, t) {
+ int curr = i++;
+
+ if (!perf_test__matches(test_description(t, -1), curr, argc, argv))
+ continue;
+
+ pr_info("%3d: %s\n", i, test_description(t, -1));
+
+ if (has_subtests(t)) {
+ int subn = num_subtests(t);
+ int subi;
+
+ for (subi = 0; subi < subn; subi++)
+ pr_info("%3d:%1d: %s\n", i, subi + 1,
+ test_description(t, subi));
+ }
+ }
+
+ perf_test__list_shell(argc, argv, i);
+
+ return 0;
+}
+
+int cmd_test(int argc, const char **argv)
+{
+ const char *test_usage[] = {
+ "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
+ NULL,
+ };
+ const char *skip = NULL;
+ const struct option test_options[] = {
+ OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('F', "dont-fork", &dont_fork,
+ "Do not fork for testcase"),
+ OPT_END()
+ };
+ const char * const test_subcommands[] = { "list", NULL };
+ struct intlist *skiplist = NULL;
+ int ret = hists__init();
+
+ if (ret < 0)
+ return ret;
+
+ /* Unbuffered output */
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
+ if (argc >= 1 && !strcmp(argv[0], "list"))
+ return perf_test__list(argc - 1, argv + 1);
+
+ symbol_conf.priv_size = sizeof(int);
+ symbol_conf.sort_by_name = true;
+ symbol_conf.try_vmlinux_path = true;
+
+ if (symbol__init(NULL) < 0)
+ return -1;
+
+ if (skip != NULL)
+ skiplist = intlist__new(skip);
+ /*
+ * Tests that create BPF maps, for instance, need more than the 64K
+ * default:
+ */
+ rlimit__bump_memlock();
+
+ return __cmd_test(argc, argv, skiplist);
+}
diff --git a/tools/perf/tests/clang.c b/tools/perf/tests/clang.c
new file mode 100644
index 000000000..a7111005d
--- /dev/null
+++ b/tools/perf/tests/clang.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include "c++/clang-c.h"
+#include <linux/kernel.h>
+
+#ifndef HAVE_LIBCLANGLLVM_SUPPORT
+static int test__clang_to_IR(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return TEST_SKIP;
+}
+
+static int test__clang_to_obj(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return TEST_SKIP;
+}
+#endif
+
+static struct test_case clang_tests[] = {
+ TEST_CASE_REASON("builtin clang compile C source to IR", clang_to_IR,
+ "not compiled in"),
+ TEST_CASE_REASON("builtin clang compile C source to ELF object",
+ clang_to_obj,
+ "not compiled in"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__clang = {
+ .desc = "builtin clang support",
+ .test_cases = clang_tests,
+};
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
new file mode 100644
index 000000000..95feb6ef3
--- /dev/null
+++ b/tools/perf/tests/code-reading.c
@@ -0,0 +1,747 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/param.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+#include "debug.h"
+#include "dso.h"
+#include "env.h"
+#include "parse-events.h"
+#include "trace-event.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "machine.h"
+#include "map.h"
+#include "symbol.h"
+#include "event.h"
+#include "record.h"
+#include "util/mmap.h"
+#include "util/string2.h"
+#include "util/synthetic-events.h"
+#include "thread.h"
+
+#include "tests.h"
+
+#include <linux/ctype.h>
+
+#define BUFSZ 1024
+#define READLEN 128
+
+struct state {
+ u64 done[1024];
+ size_t done_cnt;
+};
+
+static size_t read_objdump_chunk(const char **line, unsigned char **buf,
+ size_t *buf_len)
+{
+ size_t bytes_read = 0;
+ unsigned char *chunk_start = *buf;
+
+ /* Read bytes */
+ while (*buf_len > 0) {
+ char c1, c2;
+
+ /* Get 2 hex digits */
+ c1 = *(*line)++;
+ if (!isxdigit(c1))
+ break;
+ c2 = *(*line)++;
+ if (!isxdigit(c2))
+ break;
+
+ /* Store byte and advance buf */
+ **buf = (hex(c1) << 4) | hex(c2);
+ (*buf)++;
+ (*buf_len)--;
+ bytes_read++;
+
+ /* End of chunk? */
+ if (isspace(**line))
+ break;
+ }
+
+ /*
+ * objdump will display raw insn as LE if code endian
+ * is LE and bytes_per_chunk > 1. In that case reverse
+ * the chunk we just read.
+ *
+ * see disassemble_bytes() at binutils/objdump.c for details
+ * how objdump chooses display endian)
+ */
+ if (bytes_read > 1 && !bigendian()) {
+ unsigned char *chunk_end = chunk_start + bytes_read - 1;
+ unsigned char tmp;
+
+ while (chunk_start < chunk_end) {
+ tmp = *chunk_start;
+ *chunk_start = *chunk_end;
+ *chunk_end = tmp;
+ chunk_start++;
+ chunk_end--;
+ }
+ }
+
+ return bytes_read;
+}
+
+static size_t read_objdump_line(const char *line, unsigned char *buf,
+ size_t buf_len)
+{
+ const char *p;
+ size_t ret, bytes_read = 0;
+
+ /* Skip to a colon */
+ p = strchr(line, ':');
+ if (!p)
+ return 0;
+ p++;
+
+ /* Skip initial spaces */
+ while (*p) {
+ if (!isspace(*p))
+ break;
+ p++;
+ }
+
+ do {
+ ret = read_objdump_chunk(&p, &buf, &buf_len);
+ bytes_read += ret;
+ p++;
+ } while (ret > 0);
+
+ /* return number of successfully read bytes */
+ return bytes_read;
+}
+
+static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
+{
+ char *line = NULL;
+ size_t line_len, off_last = 0;
+ ssize_t ret;
+ int err = 0;
+ u64 addr, last_addr = start_addr;
+
+ while (off_last < *len) {
+ size_t off, read_bytes, written_bytes;
+ unsigned char tmp[BUFSZ];
+
+ ret = getline(&line, &line_len, f);
+ if (feof(f))
+ break;
+ if (ret < 0) {
+ pr_debug("getline failed\n");
+ err = -1;
+ break;
+ }
+
+ /* read objdump data into temporary buffer */
+ read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
+ if (!read_bytes)
+ continue;
+
+ if (sscanf(line, "%"PRIx64, &addr) != 1)
+ continue;
+ if (addr < last_addr) {
+ pr_debug("addr going backwards, read beyond section?\n");
+ break;
+ }
+ last_addr = addr;
+
+ /* copy it from temporary buffer to 'buf' according
+ * to address on current objdump line */
+ off = addr - start_addr;
+ if (off >= *len)
+ break;
+ written_bytes = MIN(read_bytes, *len - off);
+ memcpy(buf + off, tmp, written_bytes);
+ off_last = off + written_bytes;
+ }
+
+ /* len returns number of bytes that could not be read */
+ *len -= off_last;
+
+ free(line);
+
+ return err;
+}
+
+static int read_via_objdump(const char *filename, u64 addr, void *buf,
+ size_t len)
+{
+ char cmd[PATH_MAX * 2];
+ const char *fmt;
+ FILE *f;
+ int ret;
+
+ fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
+ ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
+ filename);
+ if (ret <= 0 || (size_t)ret >= sizeof(cmd))
+ return -1;
+
+ pr_debug("Objdump command is: %s\n", cmd);
+
+ /* Ignore objdump errors */
+ strcat(cmd, " 2>/dev/null");
+
+ f = popen(cmd, "r");
+ if (!f) {
+ pr_debug("popen failed\n");
+ return -1;
+ }
+
+ ret = read_objdump_output(f, buf, &len, addr);
+ if (len) {
+ pr_debug("objdump read too few bytes: %zd\n", len);
+ if (!ret)
+ ret = len;
+ }
+
+ pclose(f);
+
+ return ret;
+}
+
+static void dump_buf(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ pr_debug("0x%02x ", buf[i]);
+ if (i % 16 == 15)
+ pr_debug("\n");
+ }
+ pr_debug("\n");
+}
+
+static int read_object_code(u64 addr, size_t len, u8 cpumode,
+ struct thread *thread, struct state *state)
+{
+ struct addr_location al;
+ unsigned char buf1[BUFSZ] = {0};
+ unsigned char buf2[BUFSZ] = {0};
+ size_t ret_len;
+ u64 objdump_addr;
+ const char *objdump_name;
+ char decomp_name[KMOD_DECOMP_LEN];
+ bool decomp = false;
+ int ret;
+
+ pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
+
+ if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) {
+ if (cpumode == PERF_RECORD_MISC_HYPERVISOR) {
+ pr_debug("Hypervisor address can not be resolved - skipping\n");
+ return 0;
+ }
+
+ pr_debug("thread__find_map failed\n");
+ return -1;
+ }
+
+ pr_debug("File is: %s\n", al.map->dso->long_name);
+
+ if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(al.map->dso)) {
+ pr_debug("Unexpected kernel address - skipping\n");
+ return 0;
+ }
+
+ pr_debug("On file address is: %#"PRIx64"\n", al.addr);
+
+ if (len > BUFSZ)
+ len = BUFSZ;
+
+ /* Do not go off the map */
+ if (addr + len > al.map->end)
+ len = al.map->end - addr;
+
+ /* Read the object code using perf */
+ ret_len = dso__data_read_offset(al.map->dso, thread->maps->machine,
+ al.addr, buf1, len);
+ if (ret_len != len) {
+ pr_debug("dso__data_read_offset failed\n");
+ return -1;
+ }
+
+ /*
+ * Converting addresses for use by objdump requires more information.
+ * map__load() does that. See map__rip_2objdump() for details.
+ */
+ if (map__load(al.map))
+ return -1;
+
+ /* objdump struggles with kcore - try each map only once */
+ if (dso__is_kcore(al.map->dso)) {
+ size_t d;
+
+ for (d = 0; d < state->done_cnt; d++) {
+ if (state->done[d] == al.map->start) {
+ pr_debug("kcore map tested already");
+ pr_debug(" - skipping\n");
+ return 0;
+ }
+ }
+ if (state->done_cnt >= ARRAY_SIZE(state->done)) {
+ pr_debug("Too many kcore maps - skipping\n");
+ return 0;
+ }
+ state->done[state->done_cnt++] = al.map->start;
+ }
+
+ objdump_name = al.map->dso->long_name;
+ if (dso__needs_decompress(al.map->dso)) {
+ if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+ decomp_name,
+ sizeof(decomp_name)) < 0) {
+ pr_debug("decompression failed\n");
+ return -1;
+ }
+
+ decomp = true;
+ objdump_name = decomp_name;
+ }
+
+ /* Read the object code using objdump */
+ objdump_addr = map__rip_2objdump(al.map, al.addr);
+ ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+ if (decomp)
+ unlink(objdump_name);
+
+ if (ret > 0) {
+ /*
+ * The kernel maps are inaccurate - assume objdump is right in
+ * that case.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
+ len -= ret;
+ if (len) {
+ pr_debug("Reducing len to %zu\n", len);
+ } else if (dso__is_kcore(al.map->dso)) {
+ /*
+ * objdump cannot handle very large segments
+ * that may be found in kcore.
+ */
+ pr_debug("objdump failed for kcore");
+ pr_debug(" - skipping\n");
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ }
+ if (ret < 0) {
+ pr_debug("read_via_objdump failed\n");
+ return -1;
+ }
+
+ /* The results should be identical */
+ if (memcmp(buf1, buf2, len)) {
+ pr_debug("Bytes read differ from those read by objdump\n");
+ pr_debug("buf1 (dso):\n");
+ dump_buf(buf1, len);
+ pr_debug("buf2 (objdump):\n");
+ dump_buf(buf2, len);
+ return -1;
+ }
+ pr_debug("Bytes read match those read by objdump\n");
+
+ return 0;
+}
+
+static int process_sample_event(struct machine *machine,
+ struct evlist *evlist,
+ union perf_event *event, struct state *state)
+{
+ struct perf_sample sample;
+ struct thread *thread;
+ int ret;
+
+ if (evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ thread = machine__findnew_thread(machine, sample.pid, sample.tid);
+ if (!thread) {
+ pr_debug("machine__findnew_thread failed\n");
+ return -1;
+ }
+
+ ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
+ thread__put(thread);
+ return ret;
+}
+
+static int process_event(struct machine *machine, struct evlist *evlist,
+ union perf_event *event, struct state *state)
+{
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ return process_sample_event(machine, evlist, event, state);
+
+ if (event->header.type == PERF_RECORD_THROTTLE ||
+ event->header.type == PERF_RECORD_UNTHROTTLE)
+ return 0;
+
+ if (event->header.type < PERF_RECORD_MAX) {
+ int ret;
+
+ ret = machine__process_event(machine, event, NULL);
+ if (ret < 0)
+ pr_debug("machine__process_event failed, event type %u\n",
+ event->header.type);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int process_events(struct machine *machine, struct evlist *evlist,
+ struct state *state)
+{
+ union perf_event *event;
+ struct mmap *md;
+ int i, ret;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ ret = process_event(machine, evlist, event, state);
+ perf_mmap__consume(&md->core);
+ if (ret < 0)
+ return ret;
+ }
+ perf_mmap__read_done(&md->core);
+ }
+ return 0;
+}
+
+static int comp(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static void do_sort_something(void)
+{
+ int buf[40960], i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
+ buf[i] = ARRAY_SIZE(buf) - i - 1;
+
+ qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
+
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
+ if (buf[i] != i) {
+ pr_debug("qsort failed\n");
+ break;
+ }
+ }
+}
+
+static void sort_something(void)
+{
+ int i;
+
+ for (i = 0; i < 10; i++)
+ do_sort_something();
+}
+
+static void syscall_something(void)
+{
+ int pipefd[2];
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (pipe(pipefd) < 0) {
+ pr_debug("pipe failed\n");
+ break;
+ }
+ close(pipefd[1]);
+ close(pipefd[0]);
+ }
+}
+
+static void fs_something(void)
+{
+ const char *test_file_name = "temp-perf-code-reading-test-file--";
+ FILE *f;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ f = fopen(test_file_name, "w+");
+ if (f) {
+ fclose(f);
+ unlink(test_file_name);
+ }
+ }
+}
+
+#ifdef __s390x__
+#include "header.h" // for get_cpuid()
+#endif
+
+static const char *do_determine_event(bool excl_kernel)
+{
+ const char *event = excl_kernel ? "cycles:u" : "cycles";
+
+#ifdef __s390x__
+ char cpuid[128], model[16], model_c[16], cpum_cf_v[16];
+ unsigned int family;
+ int ret, cpum_cf_a;
+
+ if (get_cpuid(cpuid, sizeof(cpuid)))
+ goto out_clocks;
+ ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c,
+ model, cpum_cf_v, &cpum_cf_a);
+ if (ret != 5) /* Not available */
+ goto out_clocks;
+ if (excl_kernel && (cpum_cf_a & 4))
+ return event;
+ if (!excl_kernel && (cpum_cf_a & 2))
+ return event;
+
+ /* Fall through: missing authorization */
+out_clocks:
+ event = excl_kernel ? "cpu-clock:u" : "cpu-clock";
+
+#endif
+ return event;
+}
+
+static void do_something(void)
+{
+ fs_something();
+
+ sort_something();
+
+ syscall_something();
+}
+
+enum {
+ TEST_CODE_READING_OK,
+ TEST_CODE_READING_NO_VMLINUX,
+ TEST_CODE_READING_NO_KCORE,
+ TEST_CODE_READING_NO_ACCESS,
+ TEST_CODE_READING_NO_KERNEL_OBJ,
+};
+
+static int do_test_code_reading(bool try_kcore)
+{
+ struct machine *machine;
+ struct thread *thread;
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 500,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct state state = {
+ .done_cnt = 0,
+ };
+ struct perf_thread_map *threads = NULL;
+ struct perf_cpu_map *cpus = NULL;
+ struct evlist *evlist = NULL;
+ struct evsel *evsel = NULL;
+ int err = -1, ret;
+ pid_t pid;
+ struct map *map;
+ bool have_vmlinux, have_kcore, excl_kernel = false;
+
+ pid = getpid();
+
+ machine = machine__new_host();
+ machine->env = &perf_env;
+
+ ret = machine__create_kernel_maps(machine);
+ if (ret < 0) {
+ pr_debug("machine__create_kernel_maps failed\n");
+ goto out_err;
+ }
+
+ /* Force the use of kallsyms instead of vmlinux to try kcore */
+ if (try_kcore)
+ symbol_conf.kallsyms_name = "/proc/kallsyms";
+
+ /* Load kernel map */
+ map = machine__kernel_map(machine);
+ ret = map__load(map);
+ if (ret < 0) {
+ pr_debug("map__load failed\n");
+ goto out_err;
+ }
+ have_vmlinux = dso__is_vmlinux(map->dso);
+ have_kcore = dso__is_kcore(map->dso);
+
+ /* 2nd time through we just try kcore */
+ if (try_kcore && !have_kcore)
+ return TEST_CODE_READING_NO_KCORE;
+
+ /* No point getting kernel events if there is no kernel object */
+ if (!have_vmlinux && !have_kcore)
+ excl_kernel = true;
+
+ threads = thread_map__new_by_tid(pid);
+ if (!threads) {
+ pr_debug("thread_map__new_by_tid failed\n");
+ goto out_err;
+ }
+
+ ret = perf_event__synthesize_thread_map(NULL, threads,
+ perf_event__process, machine,
+ true, false);
+ if (ret < 0) {
+ pr_debug("perf_event__synthesize_thread_map failed\n");
+ goto out_err;
+ }
+
+ thread = machine__findnew_thread(machine, pid, pid);
+ if (!thread) {
+ pr_debug("machine__findnew_thread failed\n");
+ goto out_put;
+ }
+
+ cpus = perf_cpu_map__new(NULL);
+ if (!cpus) {
+ pr_debug("perf_cpu_map__new failed\n");
+ goto out_put;
+ }
+
+ while (1) {
+ const char *str;
+
+ evlist = evlist__new();
+ if (!evlist) {
+ pr_debug("evlist__new failed\n");
+ goto out_put;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ str = do_determine_event(excl_kernel);
+ pr_debug("Parsing event '%s'\n", str);
+ ret = parse_event(evlist, str);
+ if (ret < 0) {
+ pr_debug("parse_events failed\n");
+ goto out_put;
+ }
+
+ evlist__config(evlist, &opts, NULL);
+
+ evsel = evlist__first(evlist);
+
+ evsel->core.attr.comm = 1;
+ evsel->core.attr.disabled = 1;
+ evsel->core.attr.enable_on_exec = 0;
+
+ ret = evlist__open(evlist);
+ if (ret < 0) {
+ if (!excl_kernel) {
+ excl_kernel = true;
+ /*
+ * Both cpus and threads are now owned by evlist
+ * and will be freed by following perf_evlist__set_maps
+ * call. Getting reference to keep them alive.
+ */
+ perf_cpu_map__get(cpus);
+ perf_thread_map__get(threads);
+ perf_evlist__set_maps(&evlist->core, NULL, NULL);
+ evlist__delete(evlist);
+ evlist = NULL;
+ continue;
+ }
+
+ if (verbose > 0) {
+ char errbuf[512];
+ evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+ pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
+ }
+
+ goto out_put;
+ }
+ break;
+ }
+
+ ret = evlist__mmap(evlist, UINT_MAX);
+ if (ret < 0) {
+ pr_debug("evlist__mmap failed\n");
+ goto out_put;
+ }
+
+ evlist__enable(evlist);
+
+ do_something();
+
+ evlist__disable(evlist);
+
+ ret = process_events(machine, evlist, &state);
+ if (ret < 0)
+ goto out_put;
+
+ if (!have_vmlinux && !have_kcore && !try_kcore)
+ err = TEST_CODE_READING_NO_KERNEL_OBJ;
+ else if (!have_vmlinux && !try_kcore)
+ err = TEST_CODE_READING_NO_VMLINUX;
+ else if (excl_kernel)
+ err = TEST_CODE_READING_NO_ACCESS;
+ else
+ err = TEST_CODE_READING_OK;
+out_put:
+ thread__put(thread);
+out_err:
+ evlist__delete(evlist);
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+ machine__delete_threads(machine);
+ machine__delete(machine);
+
+ return err;
+}
+
+static int test__code_reading(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret;
+
+ ret = do_test_code_reading(false);
+ if (!ret)
+ ret = do_test_code_reading(true);
+
+ switch (ret) {
+ case TEST_CODE_READING_OK:
+ return 0;
+ case TEST_CODE_READING_NO_VMLINUX:
+ pr_debug("no vmlinux\n");
+ return 0;
+ case TEST_CODE_READING_NO_KCORE:
+ pr_debug("no kcore\n");
+ return 0;
+ case TEST_CODE_READING_NO_ACCESS:
+ pr_debug("no access\n");
+ return 0;
+ case TEST_CODE_READING_NO_KERNEL_OBJ:
+ pr_debug("no kernel obj\n");
+ return 0;
+ default:
+ return -1;
+ };
+}
+
+DEFINE_SUITE("Object code reading", code_reading);
diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c
new file mode 100644
index 000000000..7c873c6ae
--- /dev/null
+++ b/tools/perf/tests/cpumap.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include <stdio.h>
+#include "cpumap.h"
+#include "event.h"
+#include "util/synthetic-events.h"
+#include <string.h>
+#include <linux/bitops.h>
+#include <perf/cpumap.h>
+#include "debug.h"
+
+struct machine;
+
+static int process_event_mask(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_cpu_map *map_event = &event->cpu_map;
+ struct perf_record_cpu_map_data *data;
+ struct perf_cpu_map *map;
+ unsigned int long_size;
+
+ data = &map_event->data;
+
+ TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__MASK);
+
+ long_size = data->mask32_data.long_size;
+
+ TEST_ASSERT_VAL("wrong long_size", long_size == 4 || long_size == 8);
+
+ TEST_ASSERT_VAL("wrong nr", data->mask32_data.nr == 1);
+
+ TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(0, data));
+ TEST_ASSERT_VAL("wrong cpu", !perf_record_cpu_map_data__test_bit(1, data));
+ for (int i = 2; i <= 20; i++)
+ TEST_ASSERT_VAL("wrong cpu", perf_record_cpu_map_data__test_bit(i, data));
+
+ map = cpu_map__new_data(data);
+ TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 20);
+
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 0);
+ for (int i = 2; i <= 20; i++)
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, i - 1).cpu == i);
+
+ perf_cpu_map__put(map);
+ return 0;
+}
+
+static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_cpu_map *map_event = &event->cpu_map;
+ struct perf_record_cpu_map_data *data;
+ struct perf_cpu_map *map;
+
+ data = &map_event->data;
+
+ TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__CPUS);
+
+ TEST_ASSERT_VAL("wrong nr", data->cpus_data.nr == 2);
+ TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[0] == 1);
+ TEST_ASSERT_VAL("wrong cpu", data->cpus_data.cpu[1] == 256);
+
+ map = cpu_map__new_data(data);
+ TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 2);
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 1).cpu == 256);
+ TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
+ perf_cpu_map__put(map);
+ return 0;
+}
+
+static int process_event_range_cpus(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_cpu_map *map_event = &event->cpu_map;
+ struct perf_record_cpu_map_data *data;
+ struct perf_cpu_map *map;
+
+ data = &map_event->data;
+
+ TEST_ASSERT_VAL("wrong type", data->type == PERF_CPU_MAP__RANGE_CPUS);
+
+ TEST_ASSERT_VAL("wrong any_cpu", data->range_cpu_data.any_cpu == 0);
+ TEST_ASSERT_VAL("wrong start_cpu", data->range_cpu_data.start_cpu == 1);
+ TEST_ASSERT_VAL("wrong end_cpu", data->range_cpu_data.end_cpu == 256);
+
+ map = cpu_map__new_data(data);
+ TEST_ASSERT_VAL("wrong nr", perf_cpu_map__nr(map) == 256);
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__cpu(map, 0).cpu == 1);
+ TEST_ASSERT_VAL("wrong cpu", perf_cpu_map__max(map).cpu == 256);
+ TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1);
+ perf_cpu_map__put(map);
+ return 0;
+}
+
+
+static int test__cpu_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_cpu_map *cpus;
+
+ /* This one is better stored in a mask. */
+ cpus = perf_cpu_map__new("0,2-20");
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_cpu_map(NULL, cpus, process_event_mask, NULL));
+
+ perf_cpu_map__put(cpus);
+
+ /* This one is better stored in cpu values. */
+ cpus = perf_cpu_map__new("1,256");
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_cpu_map(NULL, cpus, process_event_cpus, NULL));
+
+ perf_cpu_map__put(cpus);
+
+ /* This one is better stored as a range. */
+ cpus = perf_cpu_map__new("1-256");
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_cpu_map(NULL, cpus, process_event_range_cpus, NULL));
+
+ perf_cpu_map__put(cpus);
+ return 0;
+}
+
+static int cpu_map_print(const char *str)
+{
+ struct perf_cpu_map *map = perf_cpu_map__new(str);
+ char buf[100];
+
+ if (!map)
+ return -1;
+
+ cpu_map__snprint(map, buf, sizeof(buf));
+ perf_cpu_map__put(map);
+
+ return !strcmp(buf, str);
+}
+
+static int test__cpu_map_print(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3,5,7,9,11,13,15,17,19,21-40"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("2-5"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
+ TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
+ return 0;
+}
+
+static int test__cpu_map_merge(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_cpu_map *a = perf_cpu_map__new("4,2,1");
+ struct perf_cpu_map *b = perf_cpu_map__new("4,5,7");
+ struct perf_cpu_map *c = perf_cpu_map__merge(a, b);
+ char buf[100];
+
+ TEST_ASSERT_VAL("failed to merge map: bad nr", perf_cpu_map__nr(c) == 5);
+ cpu_map__snprint(c, buf, sizeof(buf));
+ TEST_ASSERT_VAL("failed to merge map: bad result", !strcmp(buf, "1-2,4-5,7"));
+ perf_cpu_map__put(b);
+ perf_cpu_map__put(c);
+ return 0;
+}
+
+DEFINE_SUITE("Synthesize cpu map", cpu_map_synthesize);
+DEFINE_SUITE("Print cpu map", cpu_map_print);
+DEFINE_SUITE("Merge cpu map", cpu_map_merge);
diff --git a/tools/perf/tests/demangle-java-test.c b/tools/perf/tests/demangle-java-test.c
new file mode 100644
index 000000000..44d1be303
--- /dev/null
+++ b/tools/perf/tests/demangle-java-test.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "tests.h"
+#include "session.h"
+#include "debug.h"
+#include "demangle-java.h"
+
+static int test__demangle_java(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_OK;
+ char *buf = NULL;
+ size_t i;
+
+ struct {
+ const char *mangled, *demangled;
+ } test_cases[] = {
+ { "Ljava/lang/StringLatin1;equals([B[B)Z",
+ "boolean java.lang.StringLatin1.equals(byte[], byte[])" },
+ { "Ljava/util/zip/ZipUtils;CENSIZ([BI)J",
+ "long java.util.zip.ZipUtils.CENSIZ(byte[], int)" },
+ { "Ljava/util/regex/Pattern$BmpCharProperty;match(Ljava/util/regex/Matcher;ILjava/lang/CharSequence;)Z",
+ "boolean java.util.regex.Pattern$BmpCharProperty.match(java.util.regex.Matcher, int, java.lang.CharSequence)" },
+ { "Ljava/lang/AbstractStringBuilder;appendChars(Ljava/lang/String;II)V",
+ "void java.lang.AbstractStringBuilder.appendChars(java.lang.String, int, int)" },
+ { "Ljava/lang/Object;<init>()V",
+ "void java.lang.Object<init>()" },
+ };
+
+ for (i = 0; i < sizeof(test_cases) / sizeof(test_cases[0]); i++) {
+ buf = java_demangle_sym(test_cases[i].mangled, 0);
+ if (strcmp(buf, test_cases[i].demangled)) {
+ pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
+ buf, test_cases[i].demangled);
+ ret = TEST_FAIL;
+ }
+ free(buf);
+ }
+
+ return ret;
+}
+
+DEFINE_SUITE("Demangle Java", demangle_java);
diff --git a/tools/perf/tests/demangle-ocaml-test.c b/tools/perf/tests/demangle-ocaml-test.c
new file mode 100644
index 000000000..90a4285e2
--- /dev/null
+++ b/tools/perf/tests/demangle-ocaml-test.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "tests.h"
+#include "session.h"
+#include "debug.h"
+#include "demangle-ocaml.h"
+
+static int test__demangle_ocaml(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = TEST_OK;
+ char *buf = NULL;
+ size_t i;
+
+ struct {
+ const char *mangled, *demangled;
+ } test_cases[] = {
+ { "main",
+ NULL },
+ { "camlStdlib__array__map_154",
+ "Stdlib.array.map_154" },
+ { "camlStdlib__anon_fn$5bstdlib$2eml$3a334$2c0$2d$2d54$5d_1453",
+ "Stdlib.anon_fn[stdlib.ml:334,0--54]_1453" },
+ { "camlStdlib__bytes__$2b$2b_2205",
+ "Stdlib.bytes.++_2205" },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
+ buf = ocaml_demangle_sym(test_cases[i].mangled);
+ if ((buf == NULL && test_cases[i].demangled != NULL)
+ || (buf != NULL && test_cases[i].demangled == NULL)
+ || (buf != NULL && strcmp(buf, test_cases[i].demangled))) {
+ pr_debug("FAILED: %s: %s != %s\n", test_cases[i].mangled,
+ buf == NULL ? "(null)" : buf,
+ test_cases[i].demangled == NULL ? "(null)" : test_cases[i].demangled);
+ ret = TEST_FAIL;
+ }
+ free(buf);
+ }
+
+ return ret;
+}
+
+DEFINE_SUITE("Demangle OCaml", demangle_ocaml);
diff --git a/tools/perf/tests/dlfilter-test.c b/tools/perf/tests/dlfilter-test.c
new file mode 100644
index 000000000..84352d553
--- /dev/null
+++ b/tools/perf/tests/dlfilter-test.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test dlfilter C API. A perf.data file is synthesized and then processed
+ * by perf script with a dlfilter named dlfilter-test-api-v0.so. Also a C file
+ * is compiled to provide a dso to match the synthesized perf.data file.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/perf_event.h>
+#include <internal/lib.h>
+#include <subcmd/exec-cmd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <libgen.h>
+#include <string.h>
+#include <errno.h>
+#include "debug.h"
+#include "tool.h"
+#include "event.h"
+#include "header.h"
+#include "machine.h"
+#include "dso.h"
+#include "map.h"
+#include "symbol.h"
+#include "synthetic-events.h"
+#include "util.h"
+#include "archinsn.h"
+#include "dlfilter.h"
+#include "tests.h"
+
+#define MAP_START 0x400000
+
+struct test_data {
+ struct perf_tool tool;
+ struct machine *machine;
+ int fd;
+ u64 foo;
+ u64 bar;
+ u64 ip;
+ u64 addr;
+ char perf[PATH_MAX];
+ char perf_data_file_name[PATH_MAX];
+ char c_file_name[PATH_MAX];
+ char prog_file_name[PATH_MAX];
+ char dlfilters[PATH_MAX];
+};
+
+static int test_result(const char *msg, int ret)
+{
+ pr_debug("%s\n", msg);
+ return ret;
+}
+
+static int process(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct test_data *td = container_of(tool, struct test_data, tool);
+ int fd = td->fd;
+
+ if (writen(fd, event, event->header.size) != event->header.size)
+ return -1;
+
+ return 0;
+}
+
+#define MAXCMD 4096
+#define REDIRECT_TO_DEV_NULL " >/dev/null 2>&1"
+
+static __printf(1, 2) int system_cmd(const char *fmt, ...)
+{
+ char cmd[MAXCMD + sizeof(REDIRECT_TO_DEV_NULL)];
+ int ret;
+
+ va_list args;
+
+ va_start(args, fmt);
+ ret = vsnprintf(cmd, MAXCMD, fmt, args);
+ va_end(args);
+
+ if (ret <= 0 || ret >= MAXCMD)
+ return -1;
+
+ if (!verbose)
+ strcat(cmd, REDIRECT_TO_DEV_NULL);
+
+ pr_debug("Command: %s\n", cmd);
+ ret = system(cmd);
+ if (ret)
+ pr_debug("Failed with return value %d\n", ret);
+
+ return ret;
+}
+
+static bool have_gcc(void)
+{
+ pr_debug("Checking for gcc\n");
+ return !system_cmd("gcc --version");
+}
+
+static int write_attr(struct test_data *td, u64 sample_type, u64 *id)
+{
+ struct perf_event_attr attr = {
+ .size = sizeof(attr),
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ .sample_type = sample_type,
+ .sample_period = 1,
+ };
+
+ return perf_event__synthesize_attr(&td->tool, &attr, 1, id, process);
+}
+
+static int write_comm(int fd, pid_t pid, pid_t tid, const char *comm_str)
+{
+ struct perf_record_comm comm;
+ ssize_t sz = sizeof(comm);
+
+ comm.header.type = PERF_RECORD_COMM;
+ comm.header.misc = PERF_RECORD_MISC_USER;
+ comm.header.size = sz;
+
+ comm.pid = pid;
+ comm.tid = tid;
+ strncpy(comm.comm, comm_str, 16);
+
+ if (writen(fd, &comm, sz) != sz) {
+ pr_debug("%s failed\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int write_mmap(int fd, pid_t pid, pid_t tid, u64 start, u64 len, u64 pgoff,
+ const char *filename)
+{
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ struct perf_record_mmap *mmap = (struct perf_record_mmap *)buf;
+ size_t fsz = roundup(strlen(filename) + 1, 8);
+ ssize_t sz = sizeof(*mmap) - sizeof(mmap->filename) + fsz;
+
+ mmap->header.type = PERF_RECORD_MMAP;
+ mmap->header.misc = PERF_RECORD_MISC_USER;
+ mmap->header.size = sz;
+
+ mmap->pid = pid;
+ mmap->tid = tid;
+ mmap->start = start;
+ mmap->len = len;
+ mmap->pgoff = pgoff;
+ strncpy(mmap->filename, filename, sizeof(mmap->filename));
+
+ if (writen(fd, mmap, sz) != sz) {
+ pr_debug("%s failed\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int write_sample(struct test_data *td, u64 sample_type, u64 id, pid_t pid, pid_t tid)
+{
+ char buf[PERF_SAMPLE_MAX_SIZE];
+ union perf_event *event = (union perf_event *)buf;
+ struct perf_sample sample = {
+ .ip = td->ip,
+ .addr = td->addr,
+ .id = id,
+ .time = 1234567890,
+ .cpu = 31,
+ .pid = pid,
+ .tid = tid,
+ .period = 543212345,
+ .stream_id = 101,
+ };
+ int err;
+
+ event->header.type = PERF_RECORD_SAMPLE;
+ event->header.misc = PERF_RECORD_MISC_USER;
+ event->header.size = perf_event__sample_event_size(&sample, sample_type, 0);
+ err = perf_event__synthesize_sample(event, sample_type, 0, &sample);
+ if (err)
+ return test_result("perf_event__synthesize_sample() failed", TEST_FAIL);
+
+ err = process(&td->tool, event, &sample, td->machine);
+ if (err)
+ return test_result("Failed to write sample", TEST_FAIL);
+
+ return TEST_OK;
+}
+
+static void close_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+}
+
+static const char *prog = "int bar(){};int foo(){bar();};int main(){foo();return 0;}";
+
+static int write_prog(char *file_name)
+{
+ int fd = creat(file_name, 0644);
+ ssize_t n = strlen(prog);
+ bool err = fd < 0 || writen(fd, prog, n) != n;
+
+ close_fd(fd);
+ return err ? -1 : 0;
+}
+
+static int get_dlfilters_path(char *buf, size_t sz)
+{
+ char perf[PATH_MAX];
+ char path[PATH_MAX];
+ char *perf_path;
+ char *exec_path;
+
+ perf_exe(perf, sizeof(perf));
+ perf_path = dirname(perf);
+ snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", perf_path);
+ if (access(path, R_OK)) {
+ exec_path = get_argv_exec_path();
+ if (!exec_path)
+ return -1;
+ snprintf(path, sizeof(path), "%s/dlfilters/dlfilter-test-api-v0.so", exec_path);
+ free(exec_path);
+ if (access(path, R_OK))
+ return -1;
+ }
+ strlcpy(buf, dirname(path), sz);
+ return 0;
+}
+
+static int check_filter_desc(struct test_data *td)
+{
+ char *long_desc = NULL;
+ char *desc = NULL;
+ int ret;
+
+ if (get_filter_desc(td->dlfilters, "dlfilter-test-api-v0.so", &desc, &long_desc) &&
+ long_desc && !strcmp(long_desc, "Filter used by the 'dlfilter C API' perf test") &&
+ desc && !strcmp(desc, "dlfilter to test v0 C API"))
+ ret = 0;
+ else
+ ret = -1;
+
+ free(desc);
+ free(long_desc);
+ return ret;
+}
+
+static int get_ip_addr(struct test_data *td)
+{
+ struct map *map;
+ struct symbol *sym;
+
+ map = dso__new_map(td->prog_file_name);
+ if (!map)
+ return -1;
+
+ sym = map__find_symbol_by_name(map, "foo");
+ if (sym)
+ td->foo = sym->start;
+
+ sym = map__find_symbol_by_name(map, "bar");
+ if (sym)
+ td->bar = sym->start;
+
+ map__put(map);
+
+ td->ip = MAP_START + td->foo;
+ td->addr = MAP_START + td->bar;
+
+ return td->foo && td->bar ? 0 : -1;
+}
+
+static int do_run_perf_script(struct test_data *td, int do_early)
+{
+ return system_cmd("%s script -i %s "
+ "--dlfilter %s/dlfilter-test-api-v0.so "
+ "--dlarg first "
+ "--dlarg %d "
+ "--dlarg %" PRIu64 " "
+ "--dlarg %" PRIu64 " "
+ "--dlarg %d "
+ "--dlarg last",
+ td->perf, td->perf_data_file_name, td->dlfilters,
+ verbose, td->ip, td->addr, do_early);
+}
+
+static int run_perf_script(struct test_data *td)
+{
+ int do_early;
+ int err;
+
+ for (do_early = 0; do_early < 3; do_early++) {
+ err = do_run_perf_script(td, do_early);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+#define TEST_SAMPLE_TYPE (PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
+ PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_ADDR | PERF_SAMPLE_CPU | \
+ PERF_SAMPLE_PERIOD | PERF_SAMPLE_STREAM_ID)
+
+static int test__dlfilter_test(struct test_data *td)
+{
+ u64 sample_type = TEST_SAMPLE_TYPE;
+ pid_t pid = 12345;
+ pid_t tid = 12346;
+ u64 id = 99;
+ int err;
+
+ if (get_dlfilters_path(td->dlfilters, PATH_MAX))
+ return test_result("dlfilters not found", TEST_SKIP);
+
+ if (check_filter_desc(td))
+ return test_result("Failed to get expected filter description", TEST_FAIL);
+
+ if (!have_gcc())
+ return test_result("gcc not found", TEST_SKIP);
+
+ pr_debug("dlfilters path: %s\n", td->dlfilters);
+
+ if (write_prog(td->c_file_name))
+ return test_result("Failed to write test C file", TEST_FAIL);
+
+ if (verbose > 1)
+ system_cmd("cat %s ; echo", td->c_file_name);
+
+ if (system_cmd("gcc -g -o %s %s", td->prog_file_name, td->c_file_name))
+ return TEST_FAIL;
+
+ if (verbose > 2)
+ system_cmd("objdump -x -dS %s", td->prog_file_name);
+
+ if (get_ip_addr(td))
+ return test_result("Failed to find program symbols", TEST_FAIL);
+
+ pr_debug("Creating new host machine structure\n");
+ td->machine = machine__new_host();
+ td->machine->env = &perf_env;
+
+ td->fd = creat(td->perf_data_file_name, 0644);
+ if (td->fd < 0)
+ return test_result("Failed to create test perf.data file", TEST_FAIL);
+
+ err = perf_header__write_pipe(td->fd);
+ if (err < 0)
+ return test_result("perf_header__write_pipe() failed", TEST_FAIL);
+
+ err = write_attr(td, sample_type, &id);
+ if (err)
+ return test_result("perf_event__synthesize_attr() failed", TEST_FAIL);
+
+ if (write_comm(td->fd, pid, tid, "test-prog"))
+ return TEST_FAIL;
+
+ if (write_mmap(td->fd, pid, tid, MAP_START, 0x10000, 0, td->prog_file_name))
+ return TEST_FAIL;
+
+ if (write_sample(td, sample_type, id, pid, tid) != TEST_OK)
+ return TEST_FAIL;
+
+ if (verbose > 1)
+ system_cmd("%s script -i %s -D", td->perf, td->perf_data_file_name);
+
+ err = run_perf_script(td);
+ if (err)
+ return TEST_FAIL;
+
+ return TEST_OK;
+}
+
+static void unlink_path(const char *path)
+{
+ if (*path)
+ unlink(path);
+}
+
+static void test_data__free(struct test_data *td)
+{
+ machine__delete(td->machine);
+ close_fd(td->fd);
+ if (verbose <= 2) {
+ unlink_path(td->c_file_name);
+ unlink_path(td->prog_file_name);
+ unlink_path(td->perf_data_file_name);
+ }
+}
+
+static int test__dlfilter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct test_data td = {.fd = -1};
+ int pid = getpid();
+ int err;
+
+ perf_exe(td.perf, sizeof(td.perf));
+
+ snprintf(td.perf_data_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-perf-data", pid);
+ snprintf(td.c_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog.c", pid);
+ snprintf(td.prog_file_name, PATH_MAX, "/tmp/dlfilter-test-%u-prog", pid);
+
+ err = test__dlfilter_test(&td);
+ test_data__free(&td);
+ return err;
+}
+
+DEFINE_SUITE("dlfilter C API", dlfilter);
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
new file mode 100644
index 000000000..3419a4ab5
--- /dev/null
+++ b/tools/perf/tests/dso-data.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <dirent.h>
+#include <stdlib.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <api/fs/fs.h>
+#include "dso.h"
+#include "machine.h"
+#include "symbol.h"
+#include "tests.h"
+#include "debug.h"
+
+static char *test_file(int size)
+{
+#define TEMPL "/tmp/perf-test-XXXXXX"
+ static char buf_templ[sizeof(TEMPL)];
+ char *templ = buf_templ;
+ int fd, i;
+ unsigned char *buf;
+
+ strcpy(buf_templ, TEMPL);
+#undef TEMPL
+
+ fd = mkstemp(templ);
+ if (fd < 0) {
+ perror("mkstemp failed");
+ return NULL;
+ }
+
+ buf = malloc(size);
+ if (!buf) {
+ close(fd);
+ return NULL;
+ }
+
+ for (i = 0; i < size; i++)
+ buf[i] = (unsigned char) ((int) i % 10);
+
+ if (size != write(fd, buf, size))
+ templ = NULL;
+
+ free(buf);
+ close(fd);
+ return templ;
+}
+
+#define TEST_FILE_SIZE (DSO__DATA_CACHE_SIZE * 20)
+
+struct test_data_offset {
+ off_t offset;
+ u8 data[10];
+ int size;
+};
+
+struct test_data_offset offsets[] = {
+ /* Fill first cache page. */
+ {
+ .offset = 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read first cache page. */
+ {
+ .offset = 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Fill cache boundary pages. */
+ {
+ .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read cache boundary pages. */
+ {
+ .offset = DSO__DATA_CACHE_SIZE - DSO__DATA_CACHE_SIZE % 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Fill final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 10,
+ .data = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 },
+ .size = 10,
+ },
+ /* Read final cache page. */
+ {
+ .offset = TEST_FILE_SIZE - 3,
+ .data = { 7, 8, 9, 0, 0, 0, 0, 0, 0, 0 },
+ .size = 3,
+ },
+};
+
+/* move it from util/dso.c for compatibility */
+static int dso__data_fd(struct dso *dso, struct machine *machine)
+{
+ int fd = dso__data_get_fd(dso, machine);
+
+ if (fd >= 0)
+ dso__data_put_fd(dso);
+
+ return fd;
+}
+
+static int test__dso_data(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct machine machine;
+ struct dso *dso;
+ char *file = test_file(TEST_FILE_SIZE);
+ size_t i;
+
+ TEST_ASSERT_VAL("No test file", file);
+
+ memset(&machine, 0, sizeof(machine));
+
+ dso = dso__new((const char *)file);
+
+ TEST_ASSERT_VAL("Failed to access to dso",
+ dso__data_fd(dso, &machine) >= 0);
+
+ /* Basic 10 bytes tests. */
+ for (i = 0; i < ARRAY_SIZE(offsets); i++) {
+ struct test_data_offset *data = &offsets[i];
+ ssize_t size;
+ u8 buf[10];
+
+ memset(buf, 0, 10);
+ size = dso__data_read_offset(dso, &machine, data->offset,
+ buf, 10);
+
+ TEST_ASSERT_VAL("Wrong size", size == data->size);
+ TEST_ASSERT_VAL("Wrong data", !memcmp(buf, data->data, 10));
+ }
+
+ /* Read cross multiple cache pages. */
+ {
+ ssize_t size;
+ int c;
+ u8 *buf;
+
+ buf = malloc(TEST_FILE_SIZE);
+ TEST_ASSERT_VAL("ENOMEM\n", buf);
+
+ /* First iteration to fill caches, second one to read them. */
+ for (c = 0; c < 2; c++) {
+ memset(buf, 0, TEST_FILE_SIZE);
+ size = dso__data_read_offset(dso, &machine, 10,
+ buf, TEST_FILE_SIZE);
+
+ TEST_ASSERT_VAL("Wrong size",
+ size == (TEST_FILE_SIZE - 10));
+
+ for (i = 0; i < (size_t)size; i++)
+ TEST_ASSERT_VAL("Wrong data",
+ buf[i] == (i % 10));
+ }
+
+ free(buf);
+ }
+
+ dso__put(dso);
+ unlink(file);
+ return 0;
+}
+
+static long open_files_cnt(void)
+{
+ char path[PATH_MAX];
+ struct dirent *dent;
+ DIR *dir;
+ long nr = 0;
+
+ scnprintf(path, PATH_MAX, "%s/self/fd", procfs__mountpoint());
+ pr_debug("fd path: %s\n", path);
+
+ dir = opendir(path);
+ TEST_ASSERT_VAL("failed to open fd directory", dir);
+
+ while ((dent = readdir(dir)) != NULL) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ nr++;
+ }
+
+ closedir(dir);
+ return nr - 1;
+}
+
+static struct dso **dsos;
+
+static int dsos__create(int cnt, int size)
+{
+ int i;
+
+ dsos = malloc(sizeof(*dsos) * cnt);
+ TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
+
+ for (i = 0; i < cnt; i++) {
+ char *file;
+
+ file = test_file(size);
+ TEST_ASSERT_VAL("failed to get dso file", file);
+
+ dsos[i] = dso__new(file);
+ TEST_ASSERT_VAL("failed to get dso", dsos[i]);
+ }
+
+ return 0;
+}
+
+static void dsos__delete(int cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct dso *dso = dsos[i];
+
+ unlink(dso->name);
+ dso__put(dso);
+ }
+
+ free(dsos);
+}
+
+static int set_fd_limit(int n)
+{
+ struct rlimit rlim;
+
+ if (getrlimit(RLIMIT_NOFILE, &rlim))
+ return -1;
+
+ pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n);
+
+ rlim.rlim_cur = n;
+ return setrlimit(RLIMIT_NOFILE, &rlim);
+}
+
+static int test__dso_data_cache(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct machine machine;
+ long nr_end, nr = open_files_cnt();
+ int dso_cnt, limit, i, fd;
+
+ /* Rest the internal dso open counter limit. */
+ reset_fd_limit();
+
+ memset(&machine, 0, sizeof(machine));
+
+ /* set as system limit */
+ limit = nr * 4;
+ TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit));
+
+ /* and this is now our dso open FDs limit */
+ dso_cnt = limit / 2;
+ TEST_ASSERT_VAL("failed to create dsos\n",
+ !dsos__create(dso_cnt, TEST_FILE_SIZE));
+
+ for (i = 0; i < (dso_cnt - 1); i++) {
+ struct dso *dso = dsos[i];
+
+ /*
+ * Open dsos via dso__data_fd(), it opens the data
+ * file and keep it open (unless open file limit).
+ */
+ fd = dso__data_fd(dso, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ if (i % 2) {
+ #define BUFSIZE 10
+ u8 buf[BUFSIZE];
+ ssize_t n;
+
+ n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE);
+ TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE);
+ }
+ }
+
+ /* verify the first one is already open */
+ TEST_ASSERT_VAL("dsos[0] is not open", dsos[0]->data.fd != -1);
+
+ /* open +1 dso to reach the allowed limit */
+ fd = dso__data_fd(dsos[i], &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ /* should force the first one to be closed */
+ TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1);
+
+ /* cleanup everything */
+ dsos__delete(dso_cnt);
+
+ /* Make sure we did not leak any file descriptor. */
+ nr_end = open_files_cnt();
+ pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+ TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
+ return 0;
+}
+
+static long new_limit(int count)
+{
+ int fd = open("/dev/null", O_RDONLY);
+ long ret = fd;
+ if (count > 0)
+ ret = new_limit(--count);
+ close(fd);
+ return ret;
+}
+
+static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct machine machine;
+ long nr_end, nr = open_files_cnt(), lim = new_limit(3);
+ int fd, fd_extra;
+
+#define dso_0 (dsos[0])
+#define dso_1 (dsos[1])
+#define dso_2 (dsos[2])
+
+ /* Rest the internal dso open counter limit. */
+ reset_fd_limit();
+
+ memset(&machine, 0, sizeof(machine));
+
+ /*
+ * Test scenario:
+ * - create 3 dso objects
+ * - set process file descriptor limit to current
+ * files count + 3
+ * - test that the first dso gets closed when we
+ * reach the files count limit
+ */
+
+ /* Make sure we are able to open 3 fds anyway */
+ TEST_ASSERT_VAL("failed to set file limit",
+ !set_fd_limit((lim)));
+
+ TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE));
+
+ /* open dso_0 */
+ fd = dso__data_fd(dso_0, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ /* open dso_1 */
+ fd = dso__data_fd(dso_1, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ /*
+ * open extra file descriptor and we just
+ * reached the files count limit
+ */
+ fd_extra = open("/dev/null", O_RDONLY);
+ TEST_ASSERT_VAL("failed to open extra fd", fd_extra > 0);
+
+ /* open dso_2 */
+ fd = dso__data_fd(dso_2, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ /*
+ * dso_0 should get closed, because we reached
+ * the file descriptor limit
+ */
+ TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1);
+
+ /* open dso_0 */
+ fd = dso__data_fd(dso_0, &machine);
+ TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+ /*
+ * dso_1 should get closed, because we reached
+ * the file descriptor limit
+ */
+ TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1);
+
+ /* cleanup everything */
+ close(fd_extra);
+ dsos__delete(3);
+
+ /* Make sure we did not leak any file descriptor. */
+ nr_end = open_files_cnt();
+ pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+ TEST_ASSERT_VAL("failed leaking files", nr == nr_end);
+ return 0;
+}
+
+DEFINE_SUITE("DSO data read", dso_data);
+DEFINE_SUITE("DSO data cache", dso_data_cache);
+DEFINE_SUITE("DSO data reopen", dso_data_reopen);
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
new file mode 100644
index 000000000..afdca7f29
--- /dev/null
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/zalloc.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <unistd.h>
+#include "tests.h"
+#include "debug.h"
+#include "machine.h"
+#include "event.h"
+#include "../util/unwind.h"
+#include "perf_regs.h"
+#include "map.h"
+#include "symbol.h"
+#include "thread.h"
+#include "callchain.h"
+#include "util/synthetic-events.h"
+
+/* For bsearch. We try to unwind functions in shared object. */
+#include <stdlib.h>
+
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
+static int mmap_handler(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return machine__process_mmap2_event(machine, event, sample);
+}
+
+static int init_live_machine(struct machine *machine)
+{
+ union perf_event event;
+ pid_t pid = getpid();
+
+ memset(&event, 0, sizeof(event));
+ return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
+ mmap_handler, machine, true);
+}
+
+/*
+ * We need to keep these functions global, despite the
+ * fact that they are used only locally in this object,
+ * in order to keep them around even if the binary is
+ * stripped. If they are gone, the unwind check for
+ * symbol fails.
+ */
+int test_dwarf_unwind__thread(struct thread *thread);
+int test_dwarf_unwind__compare(void *p1, void *p2);
+int test_dwarf_unwind__krava_3(struct thread *thread);
+int test_dwarf_unwind__krava_2(struct thread *thread);
+int test_dwarf_unwind__krava_1(struct thread *thread);
+
+#define MAX_STACK 8
+
+static int unwind_entry(struct unwind_entry *entry, void *arg)
+{
+ unsigned long *cnt = (unsigned long *) arg;
+ char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
+ static const char *funcs[MAX_STACK] = {
+ "test__arch_unwind_sample",
+ "test_dwarf_unwind__thread",
+ "test_dwarf_unwind__compare",
+ "bsearch",
+ "test_dwarf_unwind__krava_3",
+ "test_dwarf_unwind__krava_2",
+ "test_dwarf_unwind__krava_1",
+ "test__dwarf_unwind"
+ };
+ /*
+ * The funcs[MAX_STACK] array index, based on the
+ * callchain order setup.
+ */
+ int idx = callchain_param.order == ORDER_CALLER ?
+ MAX_STACK - *cnt - 1 : *cnt;
+
+ if (*cnt >= MAX_STACK) {
+ pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
+ return -1;
+ }
+
+ if (!symbol) {
+ pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
+ entry->ip);
+ return -1;
+ }
+
+ (*cnt)++;
+ pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
+ symbol, entry->ip, funcs[idx]);
+ return strcmp((const char *) symbol, funcs[idx]);
+}
+
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
+{
+ struct perf_sample sample;
+ unsigned long cnt = 0;
+ int err = -1;
+
+ memset(&sample, 0, sizeof(sample));
+
+ if (test__arch_unwind_sample(&sample, thread)) {
+ pr_debug("failed to get unwind sample\n");
+ goto out;
+ }
+
+ err = unwind__get_entries(unwind_entry, &cnt, thread,
+ &sample, MAX_STACK, false);
+ if (err)
+ pr_debug("unwind failed\n");
+ else if (cnt != MAX_STACK) {
+ pr_debug("got wrong number of stack entries %lu != %d\n",
+ cnt, MAX_STACK);
+ err = -1;
+ }
+
+ out:
+ zfree(&sample.user_stack.data);
+ zfree(&sample.user_regs.regs);
+ return err;
+}
+
+static int global_unwind_retval = -INT_MAX;
+
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+{
+ /* Any possible value should be 'thread' */
+ struct thread *thread = *(struct thread **)p1;
+
+ if (global_unwind_retval == -INT_MAX) {
+ /* Call unwinder twice for both callchain orders. */
+ callchain_param.order = ORDER_CALLER;
+
+ global_unwind_retval = test_dwarf_unwind__thread(thread);
+ if (!global_unwind_retval) {
+ callchain_param.order = ORDER_CALLEE;
+ global_unwind_retval = test_dwarf_unwind__thread(thread);
+ }
+ }
+
+ return p1 - p2;
+}
+
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+{
+ struct thread *array[2] = {thread, thread};
+ void *fp = &bsearch;
+ /*
+ * make _bsearch a volatile function pointer to
+ * prevent potential optimization, which may expand
+ * bsearch and call compare directly from this function,
+ * instead of libc shared object.
+ */
+ void *(*volatile _bsearch)(void *, void *, size_t,
+ size_t, int (*)(void *, void *));
+
+ _bsearch = fp;
+ _bsearch(array, &thread, 2, sizeof(struct thread **),
+ test_dwarf_unwind__compare);
+ return global_unwind_retval;
+}
+
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+{
+ int ret;
+
+ ret = test_dwarf_unwind__krava_3(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
+}
+
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+{
+ int ret;
+
+ ret = test_dwarf_unwind__krava_2(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
+}
+
+static int test__dwarf_unwind(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct machine *machine;
+ struct thread *thread;
+ int err = -1;
+
+ machine = machine__new_host();
+ if (!machine) {
+ pr_err("Could not get machine\n");
+ return -1;
+ }
+
+ if (machine__create_kernel_maps(machine)) {
+ pr_err("Failed to create kernel maps\n");
+ return -1;
+ }
+
+ callchain_param.record_mode = CALLCHAIN_DWARF;
+ dwarf_callchain_users = true;
+
+ if (init_live_machine(machine)) {
+ pr_err("Could not init machine\n");
+ goto out;
+ }
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ thread = machine__find_thread(machine, getpid(), getpid());
+ if (!thread) {
+ pr_err("Could not get thread\n");
+ goto out;
+ }
+
+ err = test_dwarf_unwind__krava_1(thread);
+ thread__put(thread);
+
+ out:
+ machine__delete_threads(machine);
+ machine__delete(machine);
+ return err;
+}
+
+DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);
diff --git a/tools/perf/tests/event-times.c b/tools/perf/tests/event-times.c
new file mode 100644
index 000000000..e155f0e0e
--- /dev/null
+++ b/tools/perf/tests/event-times.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <perf/cpumap.h>
+#include "tests.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "debug.h"
+#include "parse-events.h"
+#include "thread_map.h"
+#include "target.h"
+
+static int attach__enable_on_exec(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+ struct target target = {
+ .uid = UINT_MAX,
+ };
+ const char *argv[] = { "true", NULL, };
+ char sbuf[STRERR_BUFSIZE];
+ int err;
+
+ pr_debug("attaching to spawned child, enable on exec\n");
+
+ err = evlist__create_maps(evlist, &target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ return err;
+ }
+
+ err = evlist__prepare_workload(evlist, &target, argv, false, NULL);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ return err;
+ }
+
+ evsel->core.attr.enable_on_exec = 1;
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ return err;
+ }
+
+ return evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__enable_on_exec(struct evlist *evlist)
+{
+ waitpid(evlist->workload.pid, NULL, 0);
+ return 0;
+}
+
+static int attach__current_disabled(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+ struct perf_thread_map *threads;
+ int err;
+
+ pr_debug("attaching to current thread as disabled\n");
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ evsel->core.attr.disabled = 1;
+
+ err = evsel__open_per_thread(evsel, threads);
+ if (err) {
+ pr_debug("Failed to open event cpu-clock:u\n");
+ return err;
+ }
+
+ perf_thread_map__put(threads);
+ return evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int attach__current_enabled(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+ struct perf_thread_map *threads;
+ int err;
+
+ pr_debug("attaching to current thread as enabled\n");
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (threads == NULL) {
+ pr_debug("failed to call thread_map__new\n");
+ return -1;
+ }
+
+ err = evsel__open_per_thread(evsel, threads);
+
+ perf_thread_map__put(threads);
+ return err == 0 ? TEST_OK : TEST_FAIL;
+}
+
+static int detach__disable(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+
+ return evsel__enable(evsel);
+}
+
+static int attach__cpu_disabled(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+ struct perf_cpu_map *cpus;
+ int err;
+
+ pr_debug("attaching to CPU 0 as enabled\n");
+
+ cpus = perf_cpu_map__new("0");
+ if (cpus == NULL) {
+ pr_debug("failed to call perf_cpu_map__new\n");
+ return -1;
+ }
+
+ evsel->core.attr.disabled = 1;
+
+ err = evsel__open_per_cpu(evsel, cpus, -1);
+ if (err) {
+ if (err == -EACCES)
+ return TEST_SKIP;
+
+ pr_debug("Failed to open event cpu-clock:u\n");
+ return err;
+ }
+
+ perf_cpu_map__put(cpus);
+ return evsel__enable(evsel);
+}
+
+static int attach__cpu_enabled(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__last(evlist);
+ struct perf_cpu_map *cpus;
+ int err;
+
+ pr_debug("attaching to CPU 0 as enabled\n");
+
+ cpus = perf_cpu_map__new("0");
+ if (cpus == NULL) {
+ pr_debug("failed to call perf_cpu_map__new\n");
+ return -1;
+ }
+
+ err = evsel__open_per_cpu(evsel, cpus, -1);
+ if (err == -EACCES)
+ return TEST_SKIP;
+
+ perf_cpu_map__put(cpus);
+ return err ? TEST_FAIL : TEST_OK;
+}
+
+static int test_times(int (attach)(struct evlist *),
+ int (detach)(struct evlist *))
+{
+ struct perf_counts_values count;
+ struct evlist *evlist = NULL;
+ struct evsel *evsel;
+ int err = -1, i;
+
+ evlist = evlist__new();
+ if (!evlist) {
+ pr_debug("failed to create event list\n");
+ goto out_err;
+ }
+
+ err = parse_event(evlist, "cpu-clock:u");
+ if (err) {
+ pr_debug("failed to parse event cpu-clock:u\n");
+ goto out_err;
+ }
+
+ evsel = evlist__last(evlist);
+ evsel->core.attr.read_format |=
+ PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
+
+ err = attach(evlist);
+ if (err == TEST_SKIP) {
+ pr_debug(" SKIP : not enough rights\n");
+ return err;
+ }
+
+ TEST_ASSERT_VAL("failed to attach", !err);
+
+ for (i = 0; i < 100000000; i++) { }
+
+ TEST_ASSERT_VAL("failed to detach", !detach(evlist));
+
+ perf_evsel__read(&evsel->core, 0, 0, &count);
+
+ err = !(count.ena == count.run);
+
+ pr_debug(" %s: ena %" PRIu64", run %" PRIu64"\n",
+ !err ? "OK " : "FAILED",
+ count.ena, count.run);
+
+out_err:
+ evlist__delete(evlist);
+ return !err ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This test creates software event 'cpu-clock'
+ * attaches it in several ways (explained below)
+ * and checks that enabled and running times
+ * match.
+ */
+static int test__event_times(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err, ret = 0;
+
+#define _T(attach, detach) \
+ err = test_times(attach, detach); \
+ if (err && (ret == TEST_OK || ret == TEST_SKIP)) \
+ ret = err;
+
+ /* attach on newly spawned process after exec */
+ _T(attach__enable_on_exec, detach__enable_on_exec)
+ /* attach on current process as enabled */
+ _T(attach__current_enabled, detach__disable)
+ /* attach on current process as disabled */
+ _T(attach__current_disabled, detach__disable)
+ /* attach on cpu as disabled */
+ _T(attach__cpu_disabled, detach__disable)
+ /* attach on cpu as enabled */
+ _T(attach__cpu_enabled, detach__disable)
+
+#undef _T
+ return ret;
+}
+
+DEFINE_SUITE("Event times", event_times);
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
new file mode 100644
index 000000000..d093a9b87
--- /dev/null
+++ b/tools/perf/tests/event_update.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <perf/cpumap.h>
+#include <string.h>
+#include "cpumap.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "header.h"
+#include "machine.h"
+#include "util/synthetic-events.h"
+#include "tool.h"
+#include "tests.h"
+#include "debug.h"
+
+static int process_event_unit(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_event_update *ev = (struct perf_record_event_update *)event;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__UNIT);
+ TEST_ASSERT_VAL("wrong unit", !strcmp(ev->unit, "KRAVA"));
+ return 0;
+}
+
+static int process_event_scale(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_event_update *ev = (struct perf_record_event_update *)event;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__SCALE);
+ TEST_ASSERT_VAL("wrong scale", ev->scale.scale == 0.123);
+ return 0;
+}
+
+struct event_name {
+ struct perf_tool tool;
+ const char *name;
+};
+
+static int process_event_name(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct event_name *tmp = container_of(tool, struct event_name, tool);
+ struct perf_record_event_update *ev = (struct perf_record_event_update *)event;
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong id", ev->type == PERF_EVENT_UPDATE__NAME);
+ TEST_ASSERT_VAL("wrong name", !strcmp(ev->name, tmp->name));
+ return 0;
+}
+
+static int process_event_cpus(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_event_update *ev = (struct perf_record_event_update *)event;
+ struct perf_cpu_map *map;
+
+ map = cpu_map__new_data(&ev->cpus.cpus);
+
+ TEST_ASSERT_VAL("wrong id", ev->id == 123);
+ TEST_ASSERT_VAL("wrong type", ev->type == PERF_EVENT_UPDATE__CPUS);
+ TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__nr(map) == 3);
+ TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 0).cpu == 1);
+ TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 1).cpu == 2);
+ TEST_ASSERT_VAL("wrong cpus", perf_cpu_map__cpu(map, 2).cpu == 3);
+ perf_cpu_map__put(map);
+ return 0;
+}
+
+static int test__event_update(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct evsel *evsel;
+ struct event_name tmp;
+ struct evlist *evlist = evlist__new_default();
+
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("failed to allocate ids",
+ !perf_evsel__alloc_id(&evsel->core, 1, 1));
+
+ perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
+
+ free((char *)evsel->unit);
+ evsel->unit = strdup("KRAVA");
+
+ TEST_ASSERT_VAL("failed to synthesize attr update unit",
+ !perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
+
+ evsel->scale = 0.123;
+
+ TEST_ASSERT_VAL("failed to synthesize attr update scale",
+ !perf_event__synthesize_event_update_scale(NULL, evsel, process_event_scale));
+
+ tmp.name = evsel__name(evsel);
+
+ TEST_ASSERT_VAL("failed to synthesize attr update name",
+ !perf_event__synthesize_event_update_name(&tmp.tool, evsel, process_event_name));
+
+ evsel->core.own_cpus = perf_cpu_map__new("1,2,3");
+
+ TEST_ASSERT_VAL("failed to synthesize attr update cpus",
+ !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
+
+ evlist__delete(evlist);
+ return 0;
+}
+
+DEFINE_SUITE("Synthesize attr update", event_update);
diff --git a/tools/perf/tests/evsel-roundtrip-name.c b/tools/perf/tests/evsel-roundtrip-name.c
new file mode 100644
index 000000000..e94fed901
--- /dev/null
+++ b/tools/perf/tests/evsel-roundtrip-name.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "tests.h"
+#include "debug.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
+#include <errno.h>
+#include <linux/kernel.h>
+
+static int perf_evsel__roundtrip_cache_name_test(void)
+{
+ char name[128];
+ int type, op, err = 0, ret = 0, i, idx;
+ struct evsel *evsel;
+ struct evlist *evlist = evlist__new();
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
+ err = parse_event(evlist, name);
+ if (err)
+ ret = err;
+ }
+ }
+ }
+
+ idx = 0;
+ evsel = evlist__first(evlist);
+
+ for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
+ for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
+ /* skip invalid cache type */
+ if (!evsel__is_cache_op_valid(type, op))
+ continue;
+
+ for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
+ __evsel__hw_cache_type_op_res_name(type, op, i, name, sizeof(name));
+ if (evsel->core.idx != idx)
+ continue;
+
+ ++idx;
+
+ if (strcmp(evsel__name(evsel), name)) {
+ pr_debug("%s != %s\n", evsel__name(evsel), name);
+ ret = -1;
+ }
+
+ evsel = evsel__next(evsel);
+ }
+ }
+ }
+
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int __perf_evsel__name_array_test(const char *const names[], int nr_names,
+ int distance)
+{
+ int i, err;
+ struct evsel *evsel;
+ struct evlist *evlist = evlist__new();
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_names; ++i) {
+ err = parse_event(evlist, names[i]);
+ if (err) {
+ pr_debug("failed to parse event '%s', err %d\n",
+ names[i], err);
+ goto out_delete_evlist;
+ }
+ }
+
+ err = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (strcmp(evsel__name(evsel), names[evsel->core.idx / distance])) {
+ --err;
+ pr_debug("%s != %s\n", evsel__name(evsel), names[evsel->core.idx / distance]);
+ }
+ }
+
+out_delete_evlist:
+ evlist__delete(evlist);
+ return err;
+}
+
+#define perf_evsel__name_array_test(names, distance) \
+ __perf_evsel__name_array_test(names, ARRAY_SIZE(names), distance)
+
+static int test__perf_evsel__roundtrip_name_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err = 0, ret = 0;
+
+ if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom"))
+ return perf_evsel__name_array_test(evsel__hw_names, 2);
+
+ err = perf_evsel__name_array_test(evsel__hw_names, 1);
+ if (err)
+ ret = err;
+
+ err = __perf_evsel__name_array_test(evsel__sw_names, PERF_COUNT_SW_DUMMY + 1, 1);
+ if (err)
+ ret = err;
+
+ err = perf_evsel__roundtrip_cache_name_test();
+ if (err)
+ ret = err;
+
+ return ret;
+}
+
+DEFINE_SUITE("Roundtrip evsel->name", perf_evsel__roundtrip_name_test);
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
new file mode 100644
index 000000000..cf4da3d74
--- /dev/null
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <traceevent/event-parse.h>
+#include "evsel.h"
+#include "tests.h"
+#include "debug.h"
+
+static int evsel__test_field(struct evsel *evsel, const char *name, int size, bool should_be_signed)
+{
+ struct tep_format_field *field = evsel__field(evsel, name);
+ int is_signed;
+ int ret = 0;
+
+ if (field == NULL) {
+ pr_debug("%s: \"%s\" field not found!\n", evsel->name, name);
+ return -1;
+ }
+
+ is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED);
+ if (should_be_signed && !is_signed) {
+ pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
+ evsel->name, name, is_signed, should_be_signed);
+ ret = -1;
+ }
+
+ if (field->size != size) {
+ pr_debug("%s: \"%s\" size (%d) should be %d!\n",
+ evsel->name, name, field->size, size);
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int test__perf_evsel__tp_sched_test(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct evsel *evsel = evsel__newtp("sched", "sched_switch");
+ int ret = 0;
+
+ if (IS_ERR(evsel)) {
+ pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
+ return -1;
+ }
+
+ if (evsel__test_field(evsel, "prev_comm", 16, false))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "prev_pid", 4, true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "prev_prio", 4, true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "prev_state", sizeof(long), true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "next_comm", 16, false))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "next_pid", 4, true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "next_prio", 4, true))
+ ret = -1;
+
+ evsel__delete(evsel);
+
+ evsel = evsel__newtp("sched", "sched_wakeup");
+
+ if (IS_ERR(evsel)) {
+ pr_debug("evsel__newtp failed with %ld\n", PTR_ERR(evsel));
+ return -1;
+ }
+
+ if (evsel__test_field(evsel, "comm", 16, false))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "pid", 4, true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "prio", 4, true))
+ ret = -1;
+
+ if (evsel__test_field(evsel, "target_cpu", 4, true))
+ ret = -1;
+
+ evsel__delete(evsel);
+ return ret;
+}
+
+DEFINE_SUITE("Parse sched tracepoints fields", perf_evsel__tp_sched_test);
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
new file mode 100644
index 000000000..51fb5f34c
--- /dev/null
+++ b/tools/perf/tests/expand-cgroup.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include "debug.h"
+#include "evlist.h"
+#include "cgroup.h"
+#include "rblist.h"
+#include "metricgroup.h"
+#include "parse-events.h"
+#include "pmu-events/pmu-events.h"
+#include "pfm.h"
+#include <subcmd/parse-options.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+static int test_expand_events(struct evlist *evlist,
+ struct rblist *metric_events)
+{
+ int i, ret = TEST_FAIL;
+ int nr_events;
+ bool was_group_event;
+ int nr_members; /* for the first evsel only */
+ const char cgrp_str[] = "A,B,C";
+ const char *cgrp_name[] = { "A", "B", "C" };
+ int nr_cgrps = ARRAY_SIZE(cgrp_name);
+ char **ev_name;
+ struct evsel *evsel;
+
+ TEST_ASSERT_VAL("evlist is empty", !evlist__empty(evlist));
+
+ nr_events = evlist->core.nr_entries;
+ ev_name = calloc(nr_events, sizeof(*ev_name));
+ if (ev_name == NULL) {
+ pr_debug("memory allocation failure\n");
+ return TEST_FAIL;
+ }
+ i = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ ev_name[i] = strdup(evsel->name);
+ if (ev_name[i] == NULL) {
+ pr_debug("memory allocation failure\n");
+ goto out;
+ }
+ i++;
+ }
+ /* remember grouping info */
+ was_group_event = evsel__is_group_event(evlist__first(evlist));
+ nr_members = evlist__first(evlist)->core.nr_members;
+
+ ret = evlist__expand_cgroup(evlist, cgrp_str, metric_events, false);
+ if (ret < 0) {
+ pr_debug("failed to expand events for cgroups\n");
+ goto out;
+ }
+
+ ret = TEST_FAIL;
+ if (evlist->core.nr_entries != nr_events * nr_cgrps) {
+ pr_debug("event count doesn't match\n");
+ goto out;
+ }
+
+ i = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (strcmp(evsel->name, ev_name[i % nr_events])) {
+ pr_debug("event name doesn't match:\n");
+ pr_debug(" evsel[%d]: %s\n expected: %s\n",
+ i, evsel->name, ev_name[i % nr_events]);
+ goto out;
+ }
+ if (strcmp(evsel->cgrp->name, cgrp_name[i / nr_events])) {
+ pr_debug("cgroup name doesn't match:\n");
+ pr_debug(" evsel[%d]: %s\n expected: %s\n",
+ i, evsel->cgrp->name, cgrp_name[i / nr_events]);
+ goto out;
+ }
+
+ if ((i % nr_events) == 0) {
+ if (evsel__is_group_event(evsel) != was_group_event) {
+ pr_debug("event group doesn't match: got %s, expect %s\n",
+ evsel__is_group_event(evsel) ? "true" : "false",
+ was_group_event ? "true" : "false");
+ goto out;
+ }
+ if (evsel->core.nr_members != nr_members) {
+ pr_debug("event group member doesn't match: %d vs %d\n",
+ evsel->core.nr_members, nr_members);
+ goto out;
+ }
+ }
+ i++;
+ }
+ ret = TEST_OK;
+
+out: for (i = 0; i < nr_events; i++)
+ free(ev_name[i]);
+ free(ev_name);
+ return ret;
+}
+
+static int expand_default_events(void)
+{
+ int ret;
+ struct rblist metric_events;
+ struct evlist *evlist = evlist__new_default();
+
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ rblist__init(&metric_events);
+ ret = test_expand_events(evlist, &metric_events);
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int expand_group_events(void)
+{
+ int ret;
+ struct evlist *evlist;
+ struct rblist metric_events;
+ struct parse_events_error err;
+ const char event_str[] = "{cycles,instructions}";
+
+ symbol_conf.event_group = true;
+
+ evlist = evlist__new();
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ parse_events_error__init(&err);
+ ret = parse_events(evlist, event_str, &err);
+ if (ret < 0) {
+ pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+ event_str, ret, err.str);
+ parse_events_error__print(&err, event_str);
+ goto out;
+ }
+
+ rblist__init(&metric_events);
+ ret = test_expand_events(evlist, &metric_events);
+out:
+ parse_events_error__exit(&err);
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int expand_libpfm_events(void)
+{
+ int ret;
+ struct evlist *evlist;
+ struct rblist metric_events;
+ const char event_str[] = "CYCLES";
+ struct option opt = {
+ .value = &evlist,
+ };
+
+ symbol_conf.event_group = true;
+
+ evlist = evlist__new();
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ ret = parse_libpfm_events_option(&opt, event_str, 0);
+ if (ret < 0) {
+ pr_debug("failed to parse libpfm event '%s', err %d\n",
+ event_str, ret);
+ goto out;
+ }
+ if (evlist__empty(evlist)) {
+ pr_debug("libpfm was not enabled\n");
+ goto out;
+ }
+
+ rblist__init(&metric_events);
+ ret = test_expand_events(evlist, &metric_events);
+out:
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int expand_metric_events(void)
+{
+ int ret;
+ struct evlist *evlist;
+ struct rblist metric_events;
+ const char metric_str[] = "CPI";
+ const struct pmu_events_table *pme_test;
+
+ evlist = evlist__new();
+ TEST_ASSERT_VAL("failed to get evlist", evlist);
+
+ rblist__init(&metric_events);
+ pme_test = find_core_events_table("testarch", "testcpu");
+ ret = metricgroup__parse_groups_test(evlist, pme_test, metric_str,
+ false, false, &metric_events);
+ if (ret < 0) {
+ pr_debug("failed to parse '%s' metric\n", metric_str);
+ goto out;
+ }
+
+ ret = test_expand_events(evlist, &metric_events);
+
+out:
+ metricgroup__rblist_exit(&metric_events);
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int test__expand_cgroup_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int ret;
+
+ ret = expand_default_events();
+ TEST_ASSERT_EQUAL("failed to expand default events", ret, 0);
+
+ ret = expand_group_events();
+ TEST_ASSERT_EQUAL("failed to expand event group", ret, 0);
+
+ ret = expand_libpfm_events();
+ TEST_ASSERT_EQUAL("failed to expand event group", ret, 0);
+
+ ret = expand_metric_events();
+ TEST_ASSERT_EQUAL("failed to expand metric events", ret, 0);
+
+ return ret;
+}
+
+DEFINE_SUITE("Event expansion for cgroups", expand_cgroup_events);
diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c
new file mode 100644
index 000000000..6512f5e22
--- /dev/null
+++ b/tools/perf/tests/expr.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/cputopo.h"
+#include "util/debug.h"
+#include "util/expr.h"
+#include "util/header.h"
+#include "util/smt.h"
+#include "tests.h"
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/zalloc.h>
+
+static int test_ids_union(void)
+{
+ struct hashmap *ids1, *ids2;
+
+ /* Empty union. */
+ ids1 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids1);
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 0);
+
+ /* Union {foo, bar} against {}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids1, strdup("foo")), 0);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids1, strdup("bar")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 2);
+
+ /* Union {foo, bar} against {foo}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("foo")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 2);
+
+ /* Union {foo, bar} against {bar,baz}. */
+ ids2 = ids__new();
+ TEST_ASSERT_VAL("ids__new", ids2);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("bar")), 0);
+ TEST_ASSERT_EQUAL("ids__insert", ids__insert(ids2, strdup("baz")), 0);
+
+ ids1 = ids__union(ids1, ids2);
+ TEST_ASSERT_EQUAL("union", (int)hashmap__size(ids1), 3);
+
+ ids__free(ids1);
+
+ return 0;
+}
+
+static int test(struct expr_parse_ctx *ctx, const char *e, double val2)
+{
+ double val;
+
+ if (expr__parse(&val, ctx, e))
+ TEST_ASSERT_VAL("parse test failed", 0);
+ TEST_ASSERT_VAL("unexpected value", val == val2);
+ return 0;
+}
+
+static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ struct expr_id_data *val_ptr;
+ const char *p;
+ double val, num_cpus, num_cores, num_dies, num_packages;
+ int ret;
+ struct expr_parse_ctx *ctx;
+ bool is_intel = false;
+ char buf[128];
+
+ if (!get_cpuid(buf, sizeof(buf)))
+ is_intel = strstr(buf, "Intel") != NULL;
+
+ TEST_ASSERT_EQUAL("ids_union", test_ids_union(), 0);
+
+ ctx = expr__ctx_new();
+ TEST_ASSERT_VAL("expr__ctx_new", ctx);
+ expr__add_id_val(ctx, strdup("FOO"), 1);
+ expr__add_id_val(ctx, strdup("BAR"), 2);
+
+ ret = test(ctx, "1+1", 2);
+ ret |= test(ctx, "FOO+BAR", 3);
+ ret |= test(ctx, "(BAR/2)%2", 1);
+ ret |= test(ctx, "1 - -4", 5);
+ ret |= test(ctx, "(FOO-1)*2 + (BAR/2)%2 - -4", 5);
+ ret |= test(ctx, "1-1 | 1", 1);
+ ret |= test(ctx, "1-1 & 1", 0);
+ ret |= test(ctx, "min(1,2) + 1", 2);
+ ret |= test(ctx, "max(1,2) + 1", 3);
+ ret |= test(ctx, "1+1 if 3*4 else 0", 2);
+ ret |= test(ctx, "100 if 1 else 200 if 1 else 300", 100);
+ ret |= test(ctx, "100 if 0 else 200 if 1 else 300", 200);
+ ret |= test(ctx, "100 if 1 else 200 if 0 else 300", 100);
+ ret |= test(ctx, "100 if 0 else 200 if 0 else 300", 300);
+ ret |= test(ctx, "1.1 + 2.1", 3.2);
+ ret |= test(ctx, ".1 + 2.", 2.1);
+ ret |= test(ctx, "d_ratio(1, 2)", 0.5);
+ ret |= test(ctx, "d_ratio(2.5, 0)", 0);
+ ret |= test(ctx, "1.1 < 2.2", 1);
+ ret |= test(ctx, "2.2 > 1.1", 1);
+ ret |= test(ctx, "1.1 < 1.1", 0);
+ ret |= test(ctx, "2.2 > 2.2", 0);
+ ret |= test(ctx, "2.2 < 1.1", 0);
+ ret |= test(ctx, "1.1 > 2.2", 0);
+ ret |= test(ctx, "1.1e10 < 1.1e100", 1);
+ ret |= test(ctx, "1.1e2 > 1.1e-2", 1);
+
+ if (ret) {
+ expr__ctx_free(ctx);
+ return ret;
+ }
+
+ p = "FOO/0";
+ ret = expr__parse(&val, ctx, p);
+ TEST_ASSERT_VAL("division by zero", ret == -1);
+
+ p = "BAR/";
+ ret = expr__parse(&val, ctx, p);
+ TEST_ASSERT_VAL("missing operand", ret == -1);
+
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("FOO + BAR + BAZ + BOZO", "FOO",
+ ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 3);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO",
+ (void **)&val_ptr));
+
+ expr__ctx_clear(ctx);
+ ctx->sctx.runtime = 3;
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@",
+ (void **)&val_ptr));
+
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("dash\\-event1 - dash\\-event2",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1",
+ (void **)&val_ptr));
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2",
+ (void **)&val_ptr));
+
+ /* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */
+ {
+ struct cpu_topology *topology = cpu_topology__new();
+ bool smton = smt_on(topology);
+ bool corewide = core_wide(/*system_wide=*/false,
+ /*user_requested_cpus=*/false,
+ topology);
+
+ cpu_topology__delete(topology);
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("EVENT1 if #smt_on else EVENT2",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids,
+ smton ? "EVENT1" : "EVENT2",
+ (void **)&val_ptr));
+
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("EVENT1 if #core_wide else EVENT2",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids,
+ corewide ? "EVENT1" : "EVENT2",
+ (void **)&val_ptr));
+
+ }
+ /* The expression is a constant 1.0 without needing to evaluate EVENT1. */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("find ids",
+ expr__find_ids("1.0 if EVENT1 > 100.0 else 1.0",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 0);
+
+ /* Test toplogy constants appear well ordered. */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("#num_cpus", expr__parse(&num_cpus, ctx, "#num_cpus") == 0);
+ TEST_ASSERT_VAL("#num_cores", expr__parse(&num_cores, ctx, "#num_cores") == 0);
+ TEST_ASSERT_VAL("#num_cpus >= #num_cores", num_cpus >= num_cores);
+ TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0);
+ TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies);
+ TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0);
+
+ if (num_dies) // Some platforms do not have CPU die support, for example s390
+ TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages);
+
+ TEST_ASSERT_VAL("#system_tsc_freq", expr__parse(&val, ctx, "#system_tsc_freq") == 0);
+ if (is_intel)
+ TEST_ASSERT_VAL("#system_tsc_freq > 0", val > 0);
+ else
+ TEST_ASSERT_VAL("#system_tsc_freq == 0", fpclassify(val) == FP_ZERO);
+
+ /*
+ * Source count returns the number of events aggregating in a leader
+ * event including the leader. Check parsing yields an id.
+ */
+ expr__ctx_clear(ctx);
+ TEST_ASSERT_VAL("source count",
+ expr__find_ids("source_count(EVENT1)",
+ NULL, ctx) == 0);
+ TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1);
+ TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1",
+ (void **)&val_ptr));
+
+ expr__ctx_free(ctx);
+
+ return 0;
+}
+
+DEFINE_SUITE("Simple expression parser", expr);
diff --git a/tools/perf/tests/fdarray.c b/tools/perf/tests/fdarray.c
new file mode 100644
index 000000000..40983c357
--- /dev/null
+++ b/tools/perf/tests/fdarray.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <api/fd/array.h>
+#include <poll.h>
+#include "util/debug.h"
+#include "tests/tests.h"
+
+static void fdarray__init_revents(struct fdarray *fda, short revents)
+{
+ int fd;
+
+ fda->nr = fda->nr_alloc;
+
+ for (fd = 0; fd < fda->nr; ++fd) {
+ fda->entries[fd].fd = fda->nr - fd;
+ fda->entries[fd].events = revents;
+ fda->entries[fd].revents = revents;
+ }
+}
+
+static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE *fp)
+{
+ int printed = 0;
+
+ if (verbose <= 0)
+ return 0;
+
+ printed += fprintf(fp, "\n%s: ", prefix);
+ return printed + fdarray__fprintf(fda, fp);
+}
+
+static int test__fdarray__filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int nr_fds, err = TEST_FAIL;
+ struct fdarray *fda = fdarray__new(5, 5);
+
+ if (fda == NULL) {
+ pr_debug("\nfdarray__new() failed!");
+ goto out;
+ }
+
+ fdarray__init_revents(fda, POLLIN);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
+ if (nr_fds != fda->nr_alloc) {
+ pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
+ nr_fds, fda->nr_alloc);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
+ if (nr_fds != 0) {
+ pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
+ nr_fds, fda->nr_alloc);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ fda->entries[2].revents = POLLIN;
+
+ pr_debug("\nfiltering all but fda->entries[2]:");
+ fdarray__fprintf_prefix(fda, "before", stderr);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
+ fdarray__fprintf_prefix(fda, " after", stderr);
+ if (nr_fds != 1) {
+ pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
+ goto out_delete;
+ }
+
+ fdarray__init_revents(fda, POLLHUP);
+ fda->entries[0].revents = POLLIN;
+ fda->entries[3].revents = POLLIN;
+
+ pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
+ fdarray__fprintf_prefix(fda, "before", stderr);
+ nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
+ fdarray__fprintf_prefix(fda, " after", stderr);
+ if (nr_fds != 2) {
+ pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
+ nr_fds);
+ goto out_delete;
+ }
+
+ pr_debug("\n");
+
+ err = 0;
+out_delete:
+ fdarray__delete(fda);
+out:
+ return err;
+}
+
+static int test__fdarray__add(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ struct fdarray *fda = fdarray__new(2, 2);
+
+ if (fda == NULL) {
+ pr_debug("\nfdarray__new() failed!");
+ goto out;
+ }
+
+#define FDA_CHECK(_idx, _fd, _revents) \
+ if (fda->entries[_idx].fd != _fd) { \
+ pr_debug("\n%d: fda->entries[%d](%d) != %d!", \
+ __LINE__, _idx, fda->entries[1].fd, _fd); \
+ goto out_delete; \
+ } \
+ if (fda->entries[_idx].events != (_revents)) { \
+ pr_debug("\n%d: fda->entries[%d].revents(%d) != %d!", \
+ __LINE__, _idx, fda->entries[_idx].fd, _revents); \
+ goto out_delete; \
+ }
+
+#define FDA_ADD(_idx, _fd, _revents, _nr) \
+ if (fdarray__add(fda, _fd, _revents, fdarray_flag__default) < 0) { \
+ pr_debug("\n%d: fdarray__add(fda, %d, %d) failed!", \
+ __LINE__,_fd, _revents); \
+ goto out_delete; \
+ } \
+ if (fda->nr != _nr) { \
+ pr_debug("\n%d: fdarray__add(fda, %d, %d)=%d != %d", \
+ __LINE__,_fd, _revents, fda->nr, _nr); \
+ goto out_delete; \
+ } \
+ FDA_CHECK(_idx, _fd, _revents)
+
+ FDA_ADD(0, 1, POLLIN, 1);
+ FDA_ADD(1, 2, POLLERR, 2);
+
+ fdarray__fprintf_prefix(fda, "before growing array", stderr);
+
+ FDA_ADD(2, 35, POLLHUP, 3);
+
+ if (fda->entries == NULL) {
+ pr_debug("\nfdarray__add(fda, 35, POLLHUP) should have allocated fda->pollfd!");
+ goto out_delete;
+ }
+
+ fdarray__fprintf_prefix(fda, "after 3rd add", stderr);
+
+ FDA_ADD(3, 88, POLLIN | POLLOUT, 4);
+
+ fdarray__fprintf_prefix(fda, "after 4th add", stderr);
+
+ FDA_CHECK(0, 1, POLLIN);
+ FDA_CHECK(1, 2, POLLERR);
+ FDA_CHECK(2, 35, POLLHUP);
+ FDA_CHECK(3, 88, POLLIN | POLLOUT);
+
+#undef FDA_ADD
+#undef FDA_CHECK
+
+ pr_debug("\n");
+
+ err = 0;
+out_delete:
+ fdarray__delete(fda);
+out:
+ return err;
+}
+
+DEFINE_SUITE("Filter fds with revents mask in a fdarray", fdarray__filter);
+DEFINE_SUITE("Add fd to a fdarray, making it autogrow", fdarray__add);
diff --git a/tools/perf/tests/genelf.c b/tools/perf/tests/genelf.c
new file mode 100644
index 000000000..95f3be1b6
--- /dev/null
+++ b/tools/perf/tests/genelf.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/compiler.h>
+
+#include "debug.h"
+#include "tests.h"
+
+#ifdef HAVE_JITDUMP
+#include <libelf.h>
+#include "../util/genelf.h"
+#endif
+
+#define TEMPL "/tmp/perf-test-XXXXXX"
+
+static int test__jit_write_elf(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_JITDUMP
+ static unsigned char x86_code[] = {
+ 0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */
+ 0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */
+ 0xCD, 0x80 /* int $0x80 */
+ };
+ char path[PATH_MAX];
+ int fd, ret;
+
+ strcpy(path, TEMPL);
+
+ fd = mkstemp(path);
+ if (fd < 0) {
+ perror("mkstemp failed");
+ return TEST_FAIL;
+ }
+
+ pr_info("Writing jit code to: %s\n", path);
+
+ ret = jit_write_elf(fd, 0, "main", x86_code, sizeof(x86_code),
+ NULL, 0, NULL, 0, 0);
+ close(fd);
+
+ unlink(path);
+
+ return ret ? TEST_FAIL : 0;
+#else
+ return TEST_SKIP;
+#endif
+}
+
+DEFINE_SUITE("Test jit_write_elf", jit_write_elf);
diff --git a/tools/perf/tests/hists_common.c b/tools/perf/tests/hists_common.c
new file mode 100644
index 000000000..6f34d08b8
--- /dev/null
+++ b/tools/perf/tests/hists_common.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h" // struct perf_sample
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/sort.h"
+#include "util/evsel.h"
+#include "util/machine.h"
+#include "util/thread.h"
+#include "tests/hists_common.h"
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+
+static struct {
+ u32 pid;
+ const char *comm;
+} fake_threads[] = {
+ { FAKE_PID_PERF1, "perf" },
+ { FAKE_PID_PERF2, "perf" },
+ { FAKE_PID_BASH, "bash" },
+};
+
+static struct {
+ u32 pid;
+ u64 start;
+ const char *filename;
+} fake_mmap_info[] = {
+ { FAKE_PID_PERF1, FAKE_MAP_PERF, "perf" },
+ { FAKE_PID_PERF1, FAKE_MAP_LIBC, "libc" },
+ { FAKE_PID_PERF1, FAKE_MAP_KERNEL, "[kernel]" },
+ { FAKE_PID_PERF2, FAKE_MAP_PERF, "perf" },
+ { FAKE_PID_PERF2, FAKE_MAP_LIBC, "libc" },
+ { FAKE_PID_PERF2, FAKE_MAP_KERNEL, "[kernel]" },
+ { FAKE_PID_BASH, FAKE_MAP_BASH, "bash" },
+ { FAKE_PID_BASH, FAKE_MAP_LIBC, "libc" },
+ { FAKE_PID_BASH, FAKE_MAP_KERNEL, "[kernel]" },
+};
+
+struct fake_sym {
+ u64 start;
+ u64 length;
+ const char *name;
+};
+
+static struct fake_sym perf_syms[] = {
+ { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" },
+ { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "run_command" },
+ { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "cmd_record" },
+};
+
+static struct fake_sym bash_syms[] = {
+ { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "main" },
+ { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "xmalloc" },
+ { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "xfree" },
+};
+
+static struct fake_sym libc_syms[] = {
+ { 700, 100, "malloc" },
+ { 800, 100, "free" },
+ { 900, 100, "realloc" },
+ { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "malloc" },
+ { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "free" },
+ { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "realloc" },
+};
+
+static struct fake_sym kernel_syms[] = {
+ { FAKE_SYM_OFFSET1, FAKE_SYM_LENGTH, "schedule" },
+ { FAKE_SYM_OFFSET2, FAKE_SYM_LENGTH, "page_fault" },
+ { FAKE_SYM_OFFSET3, FAKE_SYM_LENGTH, "sys_perf_event_open" },
+};
+
+static struct {
+ const char *dso_name;
+ struct fake_sym *syms;
+ size_t nr_syms;
+} fake_symbols[] = {
+ { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
+ { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
+ { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
+ { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
+};
+
+struct machine *setup_fake_machine(struct machines *machines)
+{
+ struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
+ size_t i;
+
+ if (machine == NULL) {
+ pr_debug("Not enough memory for machine setup\n");
+ return NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
+ struct thread *thread;
+
+ thread = machine__findnew_thread(machine, fake_threads[i].pid,
+ fake_threads[i].pid);
+ if (thread == NULL)
+ goto out;
+
+ thread__set_comm(thread, fake_threads[i].comm, 0);
+ thread__put(thread);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
+ struct perf_sample sample = {
+ .cpumode = PERF_RECORD_MISC_USER,
+ };
+ union perf_event fake_mmap_event = {
+ .mmap = {
+ .pid = fake_mmap_info[i].pid,
+ .tid = fake_mmap_info[i].pid,
+ .start = fake_mmap_info[i].start,
+ .len = FAKE_MAP_LENGTH,
+ .pgoff = 0ULL,
+ },
+ };
+
+ strcpy(fake_mmap_event.mmap.filename,
+ fake_mmap_info[i].filename);
+
+ machine__process_mmap_event(machine, &fake_mmap_event, &sample);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
+ size_t k;
+ struct dso *dso;
+
+ dso = machine__findnew_dso(machine, fake_symbols[i].dso_name);
+ if (dso == NULL)
+ goto out;
+
+ /* emulate dso__load() */
+ dso__set_loaded(dso);
+
+ for (k = 0; k < fake_symbols[i].nr_syms; k++) {
+ struct symbol *sym;
+ struct fake_sym *fsym = &fake_symbols[i].syms[k];
+
+ sym = symbol__new(fsym->start, fsym->length,
+ STB_GLOBAL, STT_FUNC, fsym->name);
+ if (sym == NULL) {
+ dso__put(dso);
+ goto out;
+ }
+
+ symbols__insert(&dso->symbols, sym);
+ }
+
+ dso__put(dso);
+ }
+
+ return machine;
+
+out:
+ pr_debug("Not enough memory for machine setup\n");
+ machine__delete_threads(machine);
+ return NULL;
+}
+
+void print_hists_in(struct hists *hists)
+{
+ int i = 0;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ if (hists__has(hists, need_collapse))
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ pr_info("----- %s --------\n", __func__);
+ node = rb_first_cached(root);
+ while (node) {
+ struct hist_entry *he;
+
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+
+ if (!he->filtered) {
+ pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
+ i, thread__comm_str(he->thread),
+ he->ms.map->dso->short_name,
+ he->ms.sym->name, he->stat.period);
+ }
+
+ i++;
+ node = rb_next(node);
+ }
+}
+
+void print_hists_out(struct hists *hists)
+{
+ int i = 0;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ root = &hists->entries;
+
+ pr_info("----- %s --------\n", __func__);
+ node = rb_first_cached(root);
+ while (node) {
+ struct hist_entry *he;
+
+ he = rb_entry(node, struct hist_entry, rb_node);
+
+ if (!he->filtered) {
+ pr_info("%2d: entry: %8s:%5d [%-8s] %20s: period = %"PRIu64"/%"PRIu64"\n",
+ i, thread__comm_str(he->thread), he->thread->tid,
+ he->ms.map->dso->short_name,
+ he->ms.sym->name, he->stat.period,
+ he->stat_acc ? he->stat_acc->period : 0);
+ }
+
+ i++;
+ node = rb_next(node);
+ }
+}
diff --git a/tools/perf/tests/hists_common.h b/tools/perf/tests/hists_common.h
new file mode 100644
index 000000000..a2de0ff0c
--- /dev/null
+++ b/tools/perf/tests/hists_common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_TESTS__HISTS_COMMON_H__
+#define __PERF_TESTS__HISTS_COMMON_H__
+
+struct machine;
+struct machines;
+
+#define FAKE_PID_PERF1 100
+#define FAKE_PID_PERF2 200
+#define FAKE_PID_BASH 300
+
+#define FAKE_MAP_PERF 0x400000
+#define FAKE_MAP_BASH 0x400000
+#define FAKE_MAP_LIBC 0x500000
+#define FAKE_MAP_KERNEL 0xf00000
+#define FAKE_MAP_LENGTH 0x100000
+
+#define FAKE_SYM_OFFSET1 700
+#define FAKE_SYM_OFFSET2 800
+#define FAKE_SYM_OFFSET3 900
+#define FAKE_SYM_LENGTH 100
+
+#define FAKE_IP_PERF_MAIN FAKE_MAP_PERF + FAKE_SYM_OFFSET1
+#define FAKE_IP_PERF_RUN_COMMAND FAKE_MAP_PERF + FAKE_SYM_OFFSET2
+#define FAKE_IP_PERF_CMD_RECORD FAKE_MAP_PERF + FAKE_SYM_OFFSET3
+#define FAKE_IP_BASH_MAIN FAKE_MAP_BASH + FAKE_SYM_OFFSET1
+#define FAKE_IP_BASH_XMALLOC FAKE_MAP_BASH + FAKE_SYM_OFFSET2
+#define FAKE_IP_BASH_XFREE FAKE_MAP_BASH + FAKE_SYM_OFFSET3
+#define FAKE_IP_LIBC_MALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET1
+#define FAKE_IP_LIBC_FREE FAKE_MAP_LIBC + FAKE_SYM_OFFSET2
+#define FAKE_IP_LIBC_REALLOC FAKE_MAP_LIBC + FAKE_SYM_OFFSET3
+#define FAKE_IP_KERNEL_SCHEDULE FAKE_MAP_KERNEL + FAKE_SYM_OFFSET1
+#define FAKE_IP_KERNEL_PAGE_FAULT FAKE_MAP_KERNEL + FAKE_SYM_OFFSET2
+#define FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN FAKE_MAP_KERNEL + FAKE_SYM_OFFSET3
+
+/*
+ * The setup_fake_machine() provides a test environment which consists
+ * of 3 processes that have 3 mappings and in turn, have 3 symbols
+ * respectively. See below table:
+ *
+ * Command: Pid Shared Object Symbol
+ * ............. ............. ...................
+ * perf: 100 perf main
+ * perf: 100 perf run_command
+ * perf: 100 perf cmd_record
+ * perf: 100 libc malloc
+ * perf: 100 libc free
+ * perf: 100 libc realloc
+ * perf: 100 [kernel] schedule
+ * perf: 100 [kernel] page_fault
+ * perf: 100 [kernel] sys_perf_event_open
+ * perf: 200 perf main
+ * perf: 200 perf run_command
+ * perf: 200 perf cmd_record
+ * perf: 200 libc malloc
+ * perf: 200 libc free
+ * perf: 200 libc realloc
+ * perf: 200 [kernel] schedule
+ * perf: 200 [kernel] page_fault
+ * perf: 200 [kernel] sys_perf_event_open
+ * bash: 300 bash main
+ * bash: 300 bash xmalloc
+ * bash: 300 bash xfree
+ * bash: 300 libc malloc
+ * bash: 300 libc free
+ * bash: 300 libc realloc
+ * bash: 300 [kernel] schedule
+ * bash: 300 [kernel] page_fault
+ * bash: 300 [kernel] sys_perf_event_open
+ */
+struct machine *setup_fake_machine(struct machines *machines);
+
+void print_hists_in(struct hists *hists);
+void print_hists_out(struct hists *hists);
+
+#endif /* __PERF_TESTS__HISTS_COMMON_H__ */
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
new file mode 100644
index 000000000..b42d37ff2
--- /dev/null
+++ b/tools/perf/tests/hists_cumulate.c
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/sort.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/thread.h"
+#include "util/parse-events.h"
+#include "tests/tests.h"
+#include "tests/hists_common.h"
+#include <linux/kernel.h>
+
+struct sample {
+ u32 pid;
+ u64 ip;
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+};
+
+/* For the numbers, see hists_common.c */
+static struct sample fake_samples[] = {
+ /* perf [kernel] schedule() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
+ /* perf [perf] main() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
+ /* perf [perf] cmd_record() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
+ /* perf [libc] malloc() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
+ /* perf [libc] free() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
+ /* perf [perf] main() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
+ /* perf [kernel] page_fault() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+ /* bash [bash] main() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
+ /* bash [bash] xmalloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
+ /* bash [kernel] page_fault() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+};
+
+/*
+ * Will be cast to struct ip_callchain which has all 64 bit entries
+ * of nr and ips[].
+ */
+static u64 fake_callchains[][10] = {
+ /* schedule => run_command => main */
+ { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
+ /* main */
+ { 1, FAKE_IP_PERF_MAIN, },
+ /* cmd_record => run_command => main */
+ { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
+ /* malloc => cmd_record => run_command => main */
+ { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
+ FAKE_IP_PERF_MAIN, },
+ /* free => cmd_record => run_command => main */
+ { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
+ FAKE_IP_PERF_MAIN, },
+ /* main */
+ { 1, FAKE_IP_PERF_MAIN, },
+ /* page_fault => sys_perf_event_open => run_command => main */
+ { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
+ FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
+ /* main */
+ { 1, FAKE_IP_BASH_MAIN, },
+ /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
+ { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
+ FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
+ /* page_fault => malloc => main */
+ { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
+};
+
+static int add_hist_entries(struct hists *hists, struct machine *machine)
+{
+ struct addr_location al;
+ struct evsel *evsel = hists_to_evsel(hists);
+ struct perf_sample sample = { .period = 1000, };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
+ struct hist_entry_iter iter = {
+ .evsel = evsel,
+ .sample = &sample,
+ .hide_unresolved = false,
+ };
+
+ if (symbol_conf.cumulate_callchain)
+ iter.ops = &hist_iter_cumulative;
+ else
+ iter.ops = &hist_iter_normal;
+
+ sample.cpumode = PERF_RECORD_MISC_USER;
+ sample.pid = fake_samples[i].pid;
+ sample.tid = fake_samples[i].pid;
+ sample.ip = fake_samples[i].ip;
+ sample.callchain = (struct ip_callchain *)fake_callchains[i];
+
+ if (machine__resolve(machine, &al, &sample) < 0)
+ goto out;
+
+ if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
+ NULL) < 0) {
+ addr_location__put(&al);
+ goto out;
+ }
+
+ fake_samples[i].thread = al.thread;
+ fake_samples[i].map = al.map;
+ fake_samples[i].sym = al.sym;
+ }
+
+ return TEST_OK;
+
+out:
+ pr_debug("Not enough memory for adding a hist entry\n");
+ return TEST_FAIL;
+}
+
+static void del_hist_entries(struct hists *hists)
+{
+ struct hist_entry *he;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
+ struct rb_node *node;
+
+ if (hists__has(hists, need_collapse))
+ root_in = &hists->entries_collapsed;
+ else
+ root_in = hists->entries_in;
+
+ root_out = &hists->entries;
+
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
+
+ he = rb_entry(node, struct hist_entry, rb_node);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ hist_entry__delete(he);
+ }
+}
+
+typedef int (*test_fn_t)(struct evsel *, struct machine *);
+
+#define COMM(he) (thread__comm_str(he->thread))
+#define DSO(he) (he->ms.map->dso->short_name)
+#define SYM(he) (he->ms.sym->name)
+#define CPU(he) (he->cpu)
+#define PID(he) (he->thread->tid)
+#define DEPTH(he) (he->callchain->max_depth)
+#define CDSO(cl) (cl->ms.map->dso->short_name)
+#define CSYM(cl) (cl->ms.sym->name)
+
+struct result {
+ u64 children;
+ u64 self;
+ const char *comm;
+ const char *dso;
+ const char *sym;
+};
+
+struct callchain_result {
+ u64 nr;
+ struct {
+ const char *dso;
+ const char *sym;
+ } node[10];
+};
+
+static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
+ struct callchain_result *expected_callchain, size_t nr_callchain)
+{
+ char buf[32];
+ size_t i, c;
+ struct hist_entry *he;
+ struct rb_root *root;
+ struct rb_node *node;
+ struct callchain_node *cnode;
+ struct callchain_list *clist;
+
+ /*
+ * adding and deleting hist entries must be done outside of this
+ * function since TEST_ASSERT_VAL() returns in case of failure.
+ */
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(hists_to_evsel(hists), NULL);
+
+ if (verbose > 2) {
+ pr_info("use callchain: %d, cumulate callchain: %d\n",
+ symbol_conf.use_callchain,
+ symbol_conf.cumulate_callchain);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries.rb_root;
+ for (node = rb_first(root), i = 0;
+ node && (he = rb_entry(node, struct hist_entry, rb_node));
+ node = rb_next(node), i++) {
+ scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
+
+ TEST_ASSERT_VAL("Incorrect number of hist entry",
+ i < nr_expected);
+ TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
+ !strcmp(COMM(he), expected[i].comm) &&
+ !strcmp(DSO(he), expected[i].dso) &&
+ !strcmp(SYM(he), expected[i].sym));
+
+ if (symbol_conf.cumulate_callchain)
+ TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
+
+ if (!symbol_conf.use_callchain)
+ continue;
+
+ /* check callchain entries */
+ root = &he->callchain->node.rb_root;
+
+ TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
+ cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
+
+ c = 0;
+ list_for_each_entry(clist, &cnode->val, list) {
+ scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
+
+ TEST_ASSERT_VAL("Incorrect number of callchain entry",
+ c < expected_callchain[i].nr);
+ TEST_ASSERT_VAL(buf,
+ !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
+ !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
+ c++;
+ }
+ /* TODO: handle multiple child nodes properly */
+ TEST_ASSERT_VAL("Incorrect number of callchain entry",
+ c <= expected_callchain[i].nr);
+ }
+ TEST_ASSERT_VAL("Incorrect number of hist entry",
+ i == nr_expected);
+ TEST_ASSERT_VAL("Incorrect number of callchain entry",
+ !symbol_conf.use_callchain || nr_expected == nr_callchain);
+ return 0;
+}
+
+/* NO callchain + NO children */
+static int test1(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ /*
+ * expected output:
+ *
+ * Overhead Command Shared Object Symbol
+ * ======== ======= ============= ==============
+ * 20.00% perf perf [.] main
+ * 10.00% bash [kernel] [k] page_fault
+ * 10.00% bash bash [.] main
+ * 10.00% bash bash [.] xmalloc
+ * 10.00% perf [kernel] [k] page_fault
+ * 10.00% perf [kernel] [k] schedule
+ * 10.00% perf libc [.] free
+ * 10.00% perf libc [.] malloc
+ * 10.00% perf perf [.] cmd_record
+ */
+ struct result expected[] = {
+ { 0, 2000, "perf", "perf", "main" },
+ { 0, 1000, "bash", "[kernel]", "page_fault" },
+ { 0, 1000, "bash", "bash", "main" },
+ { 0, 1000, "bash", "bash", "xmalloc" },
+ { 0, 1000, "perf", "[kernel]", "page_fault" },
+ { 0, 1000, "perf", "[kernel]", "schedule" },
+ { 0, 1000, "perf", "libc", "free" },
+ { 0, 1000, "perf", "libc", "malloc" },
+ { 0, 1000, "perf", "perf", "cmd_record" },
+ };
+
+ symbol_conf.use_callchain = false;
+ symbol_conf.cumulate_callchain = false;
+ evsel__reset_sample_bit(evsel, CALLCHAIN);
+
+ setup_sorting(NULL);
+ callchain_register_param(&callchain_param);
+
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* callchain + NO children */
+static int test2(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ /*
+ * expected output:
+ *
+ * Overhead Command Shared Object Symbol
+ * ======== ======= ============= ==============
+ * 20.00% perf perf [.] main
+ * |
+ * --- main
+ *
+ * 10.00% bash [kernel] [k] page_fault
+ * |
+ * --- page_fault
+ * malloc
+ * main
+ *
+ * 10.00% bash bash [.] main
+ * |
+ * --- main
+ *
+ * 10.00% bash bash [.] xmalloc
+ * |
+ * --- xmalloc
+ * malloc
+ * xmalloc <--- NOTE: there's a cycle
+ * malloc
+ * xmalloc
+ * main
+ *
+ * 10.00% perf [kernel] [k] page_fault
+ * |
+ * --- page_fault
+ * sys_perf_event_open
+ * run_command
+ * main
+ *
+ * 10.00% perf [kernel] [k] schedule
+ * |
+ * --- schedule
+ * run_command
+ * main
+ *
+ * 10.00% perf libc [.] free
+ * |
+ * --- free
+ * cmd_record
+ * run_command
+ * main
+ *
+ * 10.00% perf libc [.] malloc
+ * |
+ * --- malloc
+ * cmd_record
+ * run_command
+ * main
+ *
+ * 10.00% perf perf [.] cmd_record
+ * |
+ * --- cmd_record
+ * run_command
+ * main
+ *
+ */
+ struct result expected[] = {
+ { 0, 2000, "perf", "perf", "main" },
+ { 0, 1000, "bash", "[kernel]", "page_fault" },
+ { 0, 1000, "bash", "bash", "main" },
+ { 0, 1000, "bash", "bash", "xmalloc" },
+ { 0, 1000, "perf", "[kernel]", "page_fault" },
+ { 0, 1000, "perf", "[kernel]", "schedule" },
+ { 0, 1000, "perf", "libc", "free" },
+ { 0, 1000, "perf", "libc", "malloc" },
+ { 0, 1000, "perf", "perf", "cmd_record" },
+ };
+ struct callchain_result expected_callchain[] = {
+ {
+ 1, { { "perf", "main" }, },
+ },
+ {
+ 3, { { "[kernel]", "page_fault" },
+ { "libc", "malloc" },
+ { "bash", "main" }, },
+ },
+ {
+ 1, { { "bash", "main" }, },
+ },
+ {
+ 6, { { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" }, },
+ },
+ {
+ 4, { { "[kernel]", "page_fault" },
+ { "[kernel]", "sys_perf_event_open" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 3, { { "[kernel]", "schedule" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "libc", "free" },
+ { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "libc", "malloc" },
+ { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 3, { { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ };
+
+ symbol_conf.use_callchain = true;
+ symbol_conf.cumulate_callchain = false;
+ evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ setup_sorting(NULL);
+ callchain_register_param(&callchain_param);
+
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ err = do_test(hists, expected, ARRAY_SIZE(expected),
+ expected_callchain, ARRAY_SIZE(expected_callchain));
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* NO callchain + children */
+static int test3(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ /*
+ * expected output:
+ *
+ * Children Self Command Shared Object Symbol
+ * ======== ======== ======= ============= =======================
+ * 70.00% 20.00% perf perf [.] main
+ * 50.00% 0.00% perf perf [.] run_command
+ * 30.00% 10.00% bash bash [.] main
+ * 30.00% 10.00% perf perf [.] cmd_record
+ * 20.00% 0.00% bash libc [.] malloc
+ * 10.00% 10.00% bash [kernel] [k] page_fault
+ * 10.00% 10.00% bash bash [.] xmalloc
+ * 10.00% 10.00% perf [kernel] [k] page_fault
+ * 10.00% 10.00% perf libc [.] malloc
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% perf libc [.] free
+ * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
+ */
+ struct result expected[] = {
+ { 7000, 2000, "perf", "perf", "main" },
+ { 5000, 0, "perf", "perf", "run_command" },
+ { 3000, 1000, "bash", "bash", "main" },
+ { 3000, 1000, "perf", "perf", "cmd_record" },
+ { 2000, 0, "bash", "libc", "malloc" },
+ { 1000, 1000, "bash", "[kernel]", "page_fault" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
+ { 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
+ { 1000, 1000, "perf", "libc", "free" },
+ { 1000, 1000, "perf", "libc", "malloc" },
+ { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
+ };
+
+ symbol_conf.use_callchain = false;
+ symbol_conf.cumulate_callchain = true;
+ evsel__reset_sample_bit(evsel, CALLCHAIN);
+
+ setup_sorting(NULL);
+ callchain_register_param(&callchain_param);
+
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* callchain + children */
+static int test4(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ /*
+ * expected output:
+ *
+ * Children Self Command Shared Object Symbol
+ * ======== ======== ======= ============= =======================
+ * 70.00% 20.00% perf perf [.] main
+ * |
+ * --- main
+ *
+ * 50.00% 0.00% perf perf [.] run_command
+ * |
+ * --- run_command
+ * main
+ *
+ * 30.00% 10.00% bash bash [.] main
+ * |
+ * --- main
+ *
+ * 30.00% 10.00% perf perf [.] cmd_record
+ * |
+ * --- cmd_record
+ * run_command
+ * main
+ *
+ * 20.00% 0.00% bash libc [.] malloc
+ * |
+ * --- malloc
+ * |
+ * |--50.00%-- xmalloc
+ * | main
+ * --50.00%-- main
+ *
+ * 10.00% 10.00% bash [kernel] [k] page_fault
+ * |
+ * --- page_fault
+ * malloc
+ * main
+ *
+ * 10.00% 10.00% bash bash [.] xmalloc
+ * |
+ * --- xmalloc
+ * malloc
+ * xmalloc <--- NOTE: there's a cycle
+ * malloc
+ * xmalloc
+ * main
+ *
+ * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
+ * |
+ * --- sys_perf_event_open
+ * run_command
+ * main
+ *
+ * 10.00% 10.00% perf [kernel] [k] page_fault
+ * |
+ * --- page_fault
+ * sys_perf_event_open
+ * run_command
+ * main
+ *
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * |
+ * --- schedule
+ * run_command
+ * main
+ *
+ * 10.00% 10.00% perf libc [.] free
+ * |
+ * --- free
+ * cmd_record
+ * run_command
+ * main
+ *
+ * 10.00% 10.00% perf libc [.] malloc
+ * |
+ * --- malloc
+ * cmd_record
+ * run_command
+ * main
+ *
+ */
+ struct result expected[] = {
+ { 7000, 2000, "perf", "perf", "main" },
+ { 5000, 0, "perf", "perf", "run_command" },
+ { 3000, 1000, "bash", "bash", "main" },
+ { 3000, 1000, "perf", "perf", "cmd_record" },
+ { 2000, 0, "bash", "libc", "malloc" },
+ { 1000, 1000, "bash", "[kernel]", "page_fault" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
+ { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
+ { 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
+ { 1000, 1000, "perf", "libc", "free" },
+ { 1000, 1000, "perf", "libc", "malloc" },
+ };
+ struct callchain_result expected_callchain[] = {
+ {
+ 1, { { "perf", "main" }, },
+ },
+ {
+ 2, { { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 1, { { "bash", "main" }, },
+ },
+ {
+ 3, { { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" },
+ { "bash", "main" }, },
+ },
+ {
+ 3, { { "[kernel]", "page_fault" },
+ { "libc", "malloc" },
+ { "bash", "main" }, },
+ },
+ {
+ 6, { { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" }, },
+ },
+ {
+ 3, { { "[kernel]", "sys_perf_event_open" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "[kernel]", "page_fault" },
+ { "[kernel]", "sys_perf_event_open" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 3, { { "[kernel]", "schedule" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "libc", "free" },
+ { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
+ 4, { { "libc", "malloc" },
+ { "perf", "cmd_record" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ };
+
+ symbol_conf.use_callchain = true;
+ symbol_conf.cumulate_callchain = true;
+ evsel__set_sample_bit(evsel, CALLCHAIN);
+
+ setup_sorting(NULL);
+
+ callchain_param = callchain_param_default;
+ callchain_register_param(&callchain_param);
+
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ err = do_test(hists, expected, ARRAY_SIZE(expected),
+ expected_callchain, ARRAY_SIZE(expected_callchain));
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+static int test__hists_cumulate(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ struct machines machines;
+ struct machine *machine;
+ struct evsel *evsel;
+ struct evlist *evlist = evlist__new();
+ size_t i;
+ test_fn_t testcases[] = {
+ test1,
+ test2,
+ test3,
+ test4,
+ };
+
+ TEST_ASSERT_VAL("No memory", evlist);
+
+ err = parse_event(evlist, "cpu-clock");
+ if (err)
+ goto out;
+ err = TEST_FAIL;
+
+ machines__init(&machines);
+
+ /* setup threads/dso/map/symbols also */
+ machine = setup_fake_machine(&machines);
+ if (!machine)
+ goto out;
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ evsel = evlist__first(evlist);
+
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ err = testcases[i](evsel, machine);
+ if (err < 0)
+ break;
+ }
+
+out:
+ /* tear down everything */
+ evlist__delete(evlist);
+ machines__exit(&machines);
+
+ return err;
+}
+
+DEFINE_SUITE("Cumulate child hist entries", hists_cumulate);
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
new file mode 100644
index 000000000..8e1ceeb9b
--- /dev/null
+++ b/tools/perf/tests/hists_filter.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/debug.h"
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/sort.h"
+#include "util/evsel.h"
+#include "util/event.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/parse-events.h"
+#include "tests/tests.h"
+#include "tests/hists_common.h"
+#include <linux/kernel.h>
+
+struct sample {
+ u32 pid;
+ u64 ip;
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+ int socket;
+};
+
+/* For the numbers, see hists_common.c */
+static struct sample fake_samples[] = {
+ /* perf [kernel] schedule() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, .socket = 0 },
+ /* perf [perf] main() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, .socket = 0 },
+ /* perf [libc] malloc() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, .socket = 0 },
+ /* perf [perf] main() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, .socket = 0 }, /* will be merged */
+ /* perf [perf] cmd_record() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, .socket = 1 },
+ /* perf [kernel] page_fault() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 1 },
+ /* bash [bash] main() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, .socket = 2 },
+ /* bash [bash] xmalloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, .socket = 2 },
+ /* bash [libc] malloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, .socket = 3 },
+ /* bash [kernel] page_fault() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, .socket = 3 },
+};
+
+static int add_hist_entries(struct evlist *evlist,
+ struct machine *machine)
+{
+ struct evsel *evsel;
+ struct addr_location al;
+ struct perf_sample sample = { .period = 100, };
+ size_t i;
+
+ /*
+ * each evsel will have 10 samples but the 4th sample
+ * (perf [perf] main) will be collapsed to an existing entry
+ * so total 9 entries will be in the tree.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
+ struct hist_entry_iter iter = {
+ .evsel = evsel,
+ .sample = &sample,
+ .ops = &hist_iter_normal,
+ .hide_unresolved = false,
+ };
+ struct hists *hists = evsel__hists(evsel);
+
+ /* make sure it has no filter at first */
+ hists->thread_filter = NULL;
+ hists->dso_filter = NULL;
+ hists->symbol_filter_str = NULL;
+
+ sample.cpumode = PERF_RECORD_MISC_USER;
+ sample.pid = fake_samples[i].pid;
+ sample.tid = fake_samples[i].pid;
+ sample.ip = fake_samples[i].ip;
+
+ if (machine__resolve(machine, &al, &sample) < 0)
+ goto out;
+
+ al.socket = fake_samples[i].socket;
+ if (hist_entry_iter__add(&iter, &al,
+ sysctl_perf_event_max_stack, NULL) < 0) {
+ addr_location__put(&al);
+ goto out;
+ }
+
+ fake_samples[i].thread = al.thread;
+ fake_samples[i].map = al.map;
+ fake_samples[i].sym = al.sym;
+ }
+ }
+
+ return 0;
+
+out:
+ pr_debug("Not enough memory for adding a hist entry\n");
+ return TEST_FAIL;
+}
+
+static int test__hists_filter(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ struct machines machines;
+ struct machine *machine;
+ struct evsel *evsel;
+ struct evlist *evlist = evlist__new();
+
+ TEST_ASSERT_VAL("No memory", evlist);
+
+ err = parse_event(evlist, "cpu-clock");
+ if (err)
+ goto out;
+ err = parse_event(evlist, "task-clock");
+ if (err)
+ goto out;
+ err = TEST_FAIL;
+
+ /* default sort order (comm,dso,sym) will be used */
+ if (setup_sorting(NULL) < 0)
+ goto out;
+
+ machines__init(&machines);
+
+ /* setup threads/dso/map/symbols also */
+ machine = setup_fake_machine(&machines);
+ if (!machine)
+ goto out;
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ /* process sample events */
+ err = add_hist_entries(evlist, machine);
+ if (err < 0)
+ goto out;
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("Normal histogram\n");
+ print_hists_out(hists);
+ }
+
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+ TEST_ASSERT_VAL("Unmatched nr samples",
+ hists->stats.nr_samples ==
+ hists->stats.nr_non_filtered_samples);
+ TEST_ASSERT_VAL("Unmatched nr hist entries",
+ hists->nr_entries == hists->nr_non_filtered_entries);
+ TEST_ASSERT_VAL("Unmatched total period",
+ hists->stats.total_period ==
+ hists->stats.total_non_filtered_period);
+
+ /* now applying thread filter for 'bash' */
+ hists->thread_filter = fake_samples[9].thread;
+ hists__filter_by_thread(hists);
+
+ if (verbose > 2) {
+ pr_info("Histogram for thread filter\n");
+ print_hists_out(hists);
+ }
+
+ /* normal stats should be invariant */
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+
+ /* but filter stats are changed */
+ TEST_ASSERT_VAL("Unmatched nr samples for thread filter",
+ hists->stats.nr_non_filtered_samples == 4);
+ TEST_ASSERT_VAL("Unmatched nr hist entries for thread filter",
+ hists->nr_non_filtered_entries == 4);
+ TEST_ASSERT_VAL("Unmatched total period for thread filter",
+ hists->stats.total_non_filtered_period == 400);
+
+ /* remove thread filter first */
+ hists->thread_filter = NULL;
+ hists__filter_by_thread(hists);
+
+ /* now applying dso filter for 'kernel' */
+ hists->dso_filter = fake_samples[0].map->dso;
+ hists__filter_by_dso(hists);
+
+ if (verbose > 2) {
+ pr_info("Histogram for dso filter\n");
+ print_hists_out(hists);
+ }
+
+ /* normal stats should be invariant */
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+
+ /* but filter stats are changed */
+ TEST_ASSERT_VAL("Unmatched nr samples for dso filter",
+ hists->stats.nr_non_filtered_samples == 3);
+ TEST_ASSERT_VAL("Unmatched nr hist entries for dso filter",
+ hists->nr_non_filtered_entries == 3);
+ TEST_ASSERT_VAL("Unmatched total period for dso filter",
+ hists->stats.total_non_filtered_period == 300);
+
+ /* remove dso filter first */
+ hists->dso_filter = NULL;
+ hists__filter_by_dso(hists);
+
+ /*
+ * now applying symbol filter for 'main'. Also note that
+ * there's 3 samples that have 'main' symbol but the 4th
+ * entry of fake_samples was collapsed already so it won't
+ * be counted as a separate entry but the sample count and
+ * total period will be remained.
+ */
+ hists->symbol_filter_str = "main";
+ hists__filter_by_symbol(hists);
+
+ if (verbose > 2) {
+ pr_info("Histogram for symbol filter\n");
+ print_hists_out(hists);
+ }
+
+ /* normal stats should be invariant */
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+
+ /* but filter stats are changed */
+ TEST_ASSERT_VAL("Unmatched nr samples for symbol filter",
+ hists->stats.nr_non_filtered_samples == 3);
+ TEST_ASSERT_VAL("Unmatched nr hist entries for symbol filter",
+ hists->nr_non_filtered_entries == 2);
+ TEST_ASSERT_VAL("Unmatched total period for symbol filter",
+ hists->stats.total_non_filtered_period == 300);
+
+ /* remove symbol filter first */
+ hists->symbol_filter_str = NULL;
+ hists__filter_by_symbol(hists);
+
+ /* now applying socket filters */
+ hists->socket_filter = 2;
+ hists__filter_by_socket(hists);
+
+ if (verbose > 2) {
+ pr_info("Histogram for socket filters\n");
+ print_hists_out(hists);
+ }
+
+ /* normal stats should be invariant */
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+
+ /* but filter stats are changed */
+ TEST_ASSERT_VAL("Unmatched nr samples for socket filter",
+ hists->stats.nr_non_filtered_samples == 2);
+ TEST_ASSERT_VAL("Unmatched nr hist entries for socket filter",
+ hists->nr_non_filtered_entries == 2);
+ TEST_ASSERT_VAL("Unmatched total period for socket filter",
+ hists->stats.total_non_filtered_period == 200);
+
+ /* remove socket filter first */
+ hists->socket_filter = -1;
+ hists__filter_by_socket(hists);
+
+ /* now applying all filters at once. */
+ hists->thread_filter = fake_samples[1].thread;
+ hists->dso_filter = fake_samples[1].map->dso;
+ hists__filter_by_thread(hists);
+ hists__filter_by_dso(hists);
+
+ if (verbose > 2) {
+ pr_info("Histogram for all filters\n");
+ print_hists_out(hists);
+ }
+
+ /* normal stats should be invariant */
+ TEST_ASSERT_VAL("Invalid nr samples",
+ hists->stats.nr_samples == 10);
+ TEST_ASSERT_VAL("Invalid nr hist entries",
+ hists->nr_entries == 9);
+ TEST_ASSERT_VAL("Invalid total period",
+ hists->stats.total_period == 1000);
+
+ /* but filter stats are changed */
+ TEST_ASSERT_VAL("Unmatched nr samples for all filter",
+ hists->stats.nr_non_filtered_samples == 2);
+ TEST_ASSERT_VAL("Unmatched nr hist entries for all filter",
+ hists->nr_non_filtered_entries == 1);
+ TEST_ASSERT_VAL("Unmatched total period for all filter",
+ hists->stats.total_non_filtered_period == 200);
+ }
+
+
+ err = TEST_OK;
+
+out:
+ /* tear down everything */
+ evlist__delete(evlist);
+ reset_output_field();
+ machines__exit(&machines);
+
+ return err;
+}
+
+DEFINE_SUITE("Filter hist entries", hists_filter);
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
new file mode 100644
index 000000000..14b2ff808
--- /dev/null
+++ b/tools/perf/tests/hists_link.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include "debug.h"
+#include "symbol.h"
+#include "sort.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "machine.h"
+#include "parse-events.h"
+#include "hists_common.h"
+#include "util/mmap.h"
+#include <errno.h>
+#include <linux/kernel.h>
+
+struct sample {
+ u32 pid;
+ u64 ip;
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+};
+
+/* For the numbers, see hists_common.c */
+static struct sample fake_common_samples[] = {
+ /* perf [kernel] schedule() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
+ /* perf [perf] main() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
+ /* perf [perf] cmd_record() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
+ /* bash [bash] xmalloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
+ /* bash [libc] malloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
+};
+
+static struct sample fake_samples[][5] = {
+ {
+ /* perf [perf] run_command() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
+ /* perf [libc] malloc() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
+ /* perf [kernel] page_fault() */
+ { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+ /* perf [kernel] sys_perf_event_open() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
+ /* bash [libc] free() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
+ },
+ {
+ /* perf [libc] free() */
+ { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
+ /* bash [libc] malloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
+ /* bash [bash] xfee() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
+ /* bash [libc] realloc() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
+ /* bash [kernel] page_fault() */
+ { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+ },
+};
+
+static int add_hist_entries(struct evlist *evlist, struct machine *machine)
+{
+ struct evsel *evsel;
+ struct addr_location al;
+ struct hist_entry *he;
+ struct perf_sample sample = { .period = 1, .weight = 1, };
+ size_t i = 0, k;
+
+ /*
+ * each evsel will have 10 samples - 5 common and 5 distinct.
+ * However the second evsel also has a collapsed entry for
+ * "bash [libc] malloc" so total 9 entries will be in the tree.
+ */
+ evlist__for_each_entry(evlist, evsel) {
+ struct hists *hists = evsel__hists(evsel);
+
+ for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
+ sample.cpumode = PERF_RECORD_MISC_USER;
+ sample.pid = fake_common_samples[k].pid;
+ sample.tid = fake_common_samples[k].pid;
+ sample.ip = fake_common_samples[k].ip;
+
+ if (machine__resolve(machine, &al, &sample) < 0)
+ goto out;
+
+ he = hists__add_entry(hists, &al, NULL,
+ NULL, NULL, &sample, true);
+ if (he == NULL) {
+ addr_location__put(&al);
+ goto out;
+ }
+
+ fake_common_samples[k].thread = al.thread;
+ fake_common_samples[k].map = al.map;
+ fake_common_samples[k].sym = al.sym;
+ }
+
+ for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
+ sample.pid = fake_samples[i][k].pid;
+ sample.tid = fake_samples[i][k].pid;
+ sample.ip = fake_samples[i][k].ip;
+ if (machine__resolve(machine, &al, &sample) < 0)
+ goto out;
+
+ he = hists__add_entry(hists, &al, NULL,
+ NULL, NULL, &sample, true);
+ if (he == NULL) {
+ addr_location__put(&al);
+ goto out;
+ }
+
+ fake_samples[i][k].thread = al.thread;
+ fake_samples[i][k].map = al.map;
+ fake_samples[i][k].sym = al.sym;
+ }
+ i++;
+ }
+
+ return 0;
+
+out:
+ pr_debug("Not enough memory for adding a hist entry\n");
+ return -1;
+}
+
+static int find_sample(struct sample *samples, size_t nr_samples,
+ struct thread *t, struct map *m, struct symbol *s)
+{
+ while (nr_samples--) {
+ if (samples->thread == t && samples->map == m &&
+ samples->sym == s)
+ return 1;
+ samples++;
+ }
+ return 0;
+}
+
+static int __validate_match(struct hists *hists)
+{
+ size_t count = 0;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ /*
+ * Only entries from fake_common_samples should have a pair.
+ */
+ if (hists__has(hists, need_collapse))
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ node = rb_first_cached(root);
+ while (node) {
+ struct hist_entry *he;
+
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+
+ if (hist_entry__has_pairs(he)) {
+ if (find_sample(fake_common_samples,
+ ARRAY_SIZE(fake_common_samples),
+ he->thread, he->ms.map, he->ms.sym)) {
+ count++;
+ } else {
+ pr_debug("Can't find the matched entry\n");
+ return -1;
+ }
+ }
+
+ node = rb_next(node);
+ }
+
+ if (count != ARRAY_SIZE(fake_common_samples)) {
+ pr_debug("Invalid count for matched entries: %zd of %zd\n",
+ count, ARRAY_SIZE(fake_common_samples));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int validate_match(struct hists *leader, struct hists *other)
+{
+ return __validate_match(leader) || __validate_match(other);
+}
+
+static int __validate_link(struct hists *hists, int idx)
+{
+ size_t count = 0;
+ size_t count_pair = 0;
+ size_t count_dummy = 0;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ /*
+ * Leader hists (idx = 0) will have dummy entries from other,
+ * and some entries will have no pair. However every entry
+ * in other hists should have (dummy) pair.
+ */
+ if (hists__has(hists, need_collapse))
+ root = &hists->entries_collapsed;
+ else
+ root = hists->entries_in;
+
+ node = rb_first_cached(root);
+ while (node) {
+ struct hist_entry *he;
+
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+
+ if (hist_entry__has_pairs(he)) {
+ if (!find_sample(fake_common_samples,
+ ARRAY_SIZE(fake_common_samples),
+ he->thread, he->ms.map, he->ms.sym) &&
+ !find_sample(fake_samples[idx],
+ ARRAY_SIZE(fake_samples[idx]),
+ he->thread, he->ms.map, he->ms.sym)) {
+ count_dummy++;
+ }
+ count_pair++;
+ } else if (idx) {
+ pr_debug("A entry from the other hists should have pair\n");
+ return -1;
+ }
+
+ count++;
+ node = rb_next(node);
+ }
+
+ /*
+ * Note that we have a entry collapsed in the other (idx = 1) hists.
+ */
+ if (idx == 0) {
+ if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
+ pr_debug("Invalid count of dummy entries: %zd of %zd\n",
+ count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
+ return -1;
+ }
+ if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
+ pr_debug("Invalid count of total leader entries: %zd of %zd\n",
+ count, count_pair + ARRAY_SIZE(fake_samples[0]));
+ return -1;
+ }
+ } else {
+ if (count != count_pair) {
+ pr_debug("Invalid count of total other entries: %zd of %zd\n",
+ count, count_pair);
+ return -1;
+ }
+ if (count_dummy > 0) {
+ pr_debug("Other hists should not have dummy entries: %zd\n",
+ count_dummy);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int validate_link(struct hists *leader, struct hists *other)
+{
+ return __validate_link(leader, 0) || __validate_link(other, 1);
+}
+
+static int test__hists_link(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = -1;
+ struct hists *hists, *first_hists;
+ struct machines machines;
+ struct machine *machine = NULL;
+ struct evsel *evsel, *first;
+ struct evlist *evlist = evlist__new();
+
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ err = parse_event(evlist, "cpu-clock");
+ if (err)
+ goto out;
+ err = parse_event(evlist, "task-clock");
+ if (err)
+ goto out;
+
+ err = TEST_FAIL;
+ /* default sort order (comm,dso,sym) will be used */
+ if (setup_sorting(NULL) < 0)
+ goto out;
+
+ machines__init(&machines);
+
+ /* setup threads/dso/map/symbols also */
+ machine = setup_fake_machine(&machines);
+ if (!machine)
+ goto out;
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ /* process sample events */
+ err = add_hist_entries(evlist, machine);
+ if (err < 0)
+ goto out;
+
+ evlist__for_each_entry(evlist, evsel) {
+ hists = evsel__hists(evsel);
+ hists__collapse_resort(hists, NULL);
+
+ if (verbose > 2)
+ print_hists_in(hists);
+ }
+
+ first = evlist__first(evlist);
+ evsel = evlist__last(evlist);
+
+ first_hists = evsel__hists(first);
+ hists = evsel__hists(evsel);
+
+ /* match common entries */
+ hists__match(first_hists, hists);
+ err = validate_match(first_hists, hists);
+ if (err)
+ goto out;
+
+ /* link common and/or dummy entries */
+ hists__link(first_hists, hists);
+ err = validate_link(first_hists, hists);
+ if (err)
+ goto out;
+
+ err = 0;
+
+out:
+ /* tear down everything */
+ evlist__delete(evlist);
+ reset_output_field();
+ machines__exit(&machines);
+
+ return err;
+}
+
+DEFINE_SUITE("Match and link multiple hists", hists_link);
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
new file mode 100644
index 000000000..62b009325
--- /dev/null
+++ b/tools/perf/tests/hists_output.c
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/map.h"
+#include "util/symbol.h"
+#include "util/sort.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/machine.h"
+#include "util/thread.h"
+#include "util/parse-events.h"
+#include "tests/tests.h"
+#include "tests/hists_common.h"
+#include <linux/kernel.h>
+
+struct sample {
+ u32 cpu;
+ u32 pid;
+ u64 ip;
+ struct thread *thread;
+ struct map *map;
+ struct symbol *sym;
+};
+
+/* For the numbers, see hists_common.c */
+static struct sample fake_samples[] = {
+ /* perf [kernel] schedule() */
+ { .cpu = 0, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
+ /* perf [perf] main() */
+ { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
+ /* perf [perf] cmd_record() */
+ { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
+ /* perf [libc] malloc() */
+ { .cpu = 1, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
+ /* perf [libc] free() */
+ { .cpu = 2, .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
+ /* perf [perf] main() */
+ { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
+ /* perf [kernel] page_fault() */
+ { .cpu = 2, .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+ /* bash [bash] main() */
+ { .cpu = 3, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
+ /* bash [bash] xmalloc() */
+ { .cpu = 0, .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
+ /* bash [kernel] page_fault() */
+ { .cpu = 1, .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
+};
+
+static int add_hist_entries(struct hists *hists, struct machine *machine)
+{
+ struct addr_location al;
+ struct evsel *evsel = hists_to_evsel(hists);
+ struct perf_sample sample = { .period = 100, };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
+ struct hist_entry_iter iter = {
+ .evsel = evsel,
+ .sample = &sample,
+ .ops = &hist_iter_normal,
+ .hide_unresolved = false,
+ };
+
+ sample.cpumode = PERF_RECORD_MISC_USER;
+ sample.cpu = fake_samples[i].cpu;
+ sample.pid = fake_samples[i].pid;
+ sample.tid = fake_samples[i].pid;
+ sample.ip = fake_samples[i].ip;
+
+ if (machine__resolve(machine, &al, &sample) < 0)
+ goto out;
+
+ if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
+ NULL) < 0) {
+ addr_location__put(&al);
+ goto out;
+ }
+
+ fake_samples[i].thread = al.thread;
+ fake_samples[i].map = al.map;
+ fake_samples[i].sym = al.sym;
+ }
+
+ return TEST_OK;
+
+out:
+ pr_debug("Not enough memory for adding a hist entry\n");
+ return TEST_FAIL;
+}
+
+static void del_hist_entries(struct hists *hists)
+{
+ struct hist_entry *he;
+ struct rb_root_cached *root_in;
+ struct rb_root_cached *root_out;
+ struct rb_node *node;
+
+ if (hists__has(hists, need_collapse))
+ root_in = &hists->entries_collapsed;
+ else
+ root_in = hists->entries_in;
+
+ root_out = &hists->entries;
+
+ while (!RB_EMPTY_ROOT(&root_out->rb_root)) {
+ node = rb_first_cached(root_out);
+
+ he = rb_entry(node, struct hist_entry, rb_node);
+ rb_erase_cached(node, root_out);
+ rb_erase_cached(&he->rb_node_in, root_in);
+ hist_entry__delete(he);
+ }
+}
+
+typedef int (*test_fn_t)(struct evsel *, struct machine *);
+
+#define COMM(he) (thread__comm_str(he->thread))
+#define DSO(he) (he->ms.map->dso->short_name)
+#define SYM(he) (he->ms.sym->name)
+#define CPU(he) (he->cpu)
+#define PID(he) (he->thread->tid)
+
+/* default sort keys (no field) */
+static int test1(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_entry *he;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ field_order = NULL;
+ sort_order = NULL; /* equivalent to sort_order = "comm,dso,sym" */
+
+ setup_sorting(NULL);
+
+ /*
+ * expected output:
+ *
+ * Overhead Command Shared Object Symbol
+ * ======== ======= ============= ==============
+ * 20.00% perf perf [.] main
+ * 10.00% bash [kernel] [k] page_fault
+ * 10.00% bash bash [.] main
+ * 10.00% bash bash [.] xmalloc
+ * 10.00% perf [kernel] [k] page_fault
+ * 10.00% perf [kernel] [k] schedule
+ * 10.00% perf libc [.] free
+ * 10.00% perf libc [.] malloc
+ * 10.00% perf perf [.] cmd_record
+ */
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries;
+ node = rb_first_cached(root);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ !strcmp(SYM(he), "main") && he->stat.period == 200);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
+ !strcmp(SYM(he), "main") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
+ !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "schedule") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
+ !strcmp(SYM(he), "free") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
+ !strcmp(SYM(he), "malloc") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* mixed fields and sort keys */
+static int test2(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_entry *he;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ field_order = "overhead,cpu";
+ sort_order = "pid";
+
+ setup_sorting(NULL);
+
+ /*
+ * expected output:
+ *
+ * Overhead CPU Command: Pid
+ * ======== === =============
+ * 30.00% 1 perf : 100
+ * 10.00% 0 perf : 100
+ * 10.00% 2 perf : 100
+ * 20.00% 2 perf : 200
+ * 10.00% 0 bash : 300
+ * 10.00% 1 bash : 300
+ * 10.00% 3 bash : 300
+ */
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries;
+ node = rb_first_cached(root);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 1 && PID(he) == 100 && he->stat.period == 300);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 0 && PID(he) == 100 && he->stat.period == 100);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* fields only (no sort key) */
+static int test3(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_entry *he;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ field_order = "comm,overhead,dso";
+ sort_order = NULL;
+
+ setup_sorting(NULL);
+
+ /*
+ * expected output:
+ *
+ * Command Overhead Shared Object
+ * ======= ======== =============
+ * bash 20.00% bash
+ * bash 10.00% [kernel]
+ * perf 30.00% perf
+ * perf 20.00% [kernel]
+ * perf 20.00% libc
+ */
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries;
+ node = rb_first_cached(root);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
+ he->stat.period == 200);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
+ he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ he->stat.period == 300);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
+ he->stat.period == 200);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
+ he->stat.period == 200);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* handle duplicate 'dso' field */
+static int test4(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_entry *he;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ field_order = "dso,sym,comm,overhead,dso";
+ sort_order = "sym";
+
+ setup_sorting(NULL);
+
+ /*
+ * expected output:
+ *
+ * Shared Object Symbol Command Overhead
+ * ============= ============== ======= ========
+ * perf [.] cmd_record perf 10.00%
+ * libc [.] free perf 10.00%
+ * bash [.] main bash 10.00%
+ * perf [.] main perf 20.00%
+ * libc [.] malloc perf 10.00%
+ * [kernel] [k] page_fault bash 10.00%
+ * [kernel] [k] page_fault perf 10.00%
+ * [kernel] [k] schedule perf 10.00%
+ * bash [.] xmalloc bash 10.00%
+ */
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries;
+ node = rb_first_cached(root);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "cmd_record") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "free") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "main") &&
+ !strcmp(COMM(he), "bash") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "perf") && !strcmp(SYM(he), "main") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 200);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "libc") && !strcmp(SYM(he), "malloc") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
+ !strcmp(COMM(he), "bash") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "page_fault") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "[kernel]") && !strcmp(SYM(he), "schedule") &&
+ !strcmp(COMM(he), "perf") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ !strcmp(DSO(he), "bash") && !strcmp(SYM(he), "xmalloc") &&
+ !strcmp(COMM(he), "bash") && he->stat.period == 100);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+/* full sort keys w/o overhead field */
+static int test5(struct evsel *evsel, struct machine *machine)
+{
+ int err;
+ struct hists *hists = evsel__hists(evsel);
+ struct hist_entry *he;
+ struct rb_root_cached *root;
+ struct rb_node *node;
+
+ field_order = "cpu,pid,comm,dso,sym";
+ sort_order = "dso,pid";
+
+ setup_sorting(NULL);
+
+ /*
+ * expected output:
+ *
+ * CPU Command: Pid Command Shared Object Symbol
+ * === ============= ======= ============= ==============
+ * 0 perf: 100 perf [kernel] [k] schedule
+ * 2 perf: 200 perf [kernel] [k] page_fault
+ * 1 bash: 300 bash [kernel] [k] page_fault
+ * 0 bash: 300 bash bash [.] xmalloc
+ * 3 bash: 300 bash bash [.] main
+ * 1 perf: 100 perf libc [.] malloc
+ * 2 perf: 100 perf libc [.] free
+ * 1 perf: 100 perf perf [.] cmd_record
+ * 1 perf: 100 perf perf [.] main
+ * 2 perf: 200 perf perf [.] main
+ */
+ err = add_hist_entries(hists, machine);
+ if (err < 0)
+ goto out;
+
+ hists__collapse_resort(hists, NULL);
+ evsel__output_resort(evsel, NULL);
+
+ if (verbose > 2) {
+ pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
+ print_hists_out(hists);
+ }
+
+ root = &hists->entries;
+ node = rb_first_cached(root);
+ he = rb_entry(node, struct hist_entry, rb_node);
+
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 0 && PID(he) == 100 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "schedule") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 2 && PID(he) == 200 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 1 && PID(he) == 300 &&
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "[kernel]") &&
+ !strcmp(SYM(he), "page_fault") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 0 && PID(he) == 300 &&
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
+ !strcmp(SYM(he), "xmalloc") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 3 && PID(he) == 300 &&
+ !strcmp(COMM(he), "bash") && !strcmp(DSO(he), "bash") &&
+ !strcmp(SYM(he), "main") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 1 && PID(he) == 100 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
+ !strcmp(SYM(he), "malloc") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 2 && PID(he) == 100 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "libc") &&
+ !strcmp(SYM(he), "free") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 1 && PID(he) == 100 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ !strcmp(SYM(he), "cmd_record") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 1 && PID(he) == 100 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ !strcmp(SYM(he), "main") && he->stat.period == 100);
+
+ node = rb_next(node);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ TEST_ASSERT_VAL("Invalid hist entry",
+ CPU(he) == 2 && PID(he) == 200 &&
+ !strcmp(COMM(he), "perf") && !strcmp(DSO(he), "perf") &&
+ !strcmp(SYM(he), "main") && he->stat.period == 100);
+
+out:
+ del_hist_entries(hists);
+ reset_output_field();
+ return err;
+}
+
+static int test__hists_output(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ struct machines machines;
+ struct machine *machine;
+ struct evsel *evsel;
+ struct evlist *evlist = evlist__new();
+ size_t i;
+ test_fn_t testcases[] = {
+ test1,
+ test2,
+ test3,
+ test4,
+ test5,
+ };
+
+ TEST_ASSERT_VAL("No memory", evlist);
+
+ err = parse_event(evlist, "cpu-clock");
+ if (err)
+ goto out;
+ err = TEST_FAIL;
+
+ machines__init(&machines);
+
+ /* setup threads/dso/map/symbols also */
+ machine = setup_fake_machine(&machines);
+ if (!machine)
+ goto out;
+
+ if (verbose > 1)
+ machine__fprintf(machine, stderr);
+
+ evsel = evlist__first(evlist);
+
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ err = testcases[i](evsel, machine);
+ if (err < 0)
+ break;
+ }
+
+out:
+ /* tear down everything */
+ evlist__delete(evlist);
+ machines__exit(&machines);
+
+ return err;
+}
+
+DEFINE_SUITE("Sort output of hist entries", hists_output);
diff --git a/tools/perf/tests/is_printable_array.c b/tools/perf/tests/is_printable_array.c
new file mode 100644
index 000000000..f72de2457
--- /dev/null
+++ b/tools/perf/tests/is_printable_array.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include "tests.h"
+#include "debug.h"
+#include "print_binary.h"
+
+static int test__is_printable_array(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ char buf1[] = { 'k', 'r', 4, 'v', 'a', 0 };
+ char buf2[] = { 'k', 'r', 'a', 'v', 4, 0 };
+ struct {
+ char *buf;
+ unsigned int len;
+ int ret;
+ } t[] = {
+ { (char *) "krava", sizeof("krava"), 1 },
+ { (char *) "krava", sizeof("krava") - 1, 0 },
+ { (char *) "", sizeof(""), 1 },
+ { (char *) "", 0, 0 },
+ { NULL, 0, 0 },
+ { buf1, sizeof(buf1), 0 },
+ { buf2, sizeof(buf2), 0 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ int ret;
+
+ ret = is_printable_array((char *) t[i].buf, t[i].len);
+ if (ret != t[i].ret) {
+ pr_err("failed: test %u\n", i);
+ return TEST_FAIL;
+ }
+ }
+
+ return TEST_OK;
+}
+
+DEFINE_SUITE("is_printable_array", is_printable_array);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
new file mode 100644
index 000000000..8f4f9b632
--- /dev/null
+++ b/tools/perf/tests/keep-tracking.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <limits.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+#include "debug.h"
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "record.h"
+#include "thread_map.h"
+#include "tests.h"
+#include "util/mmap.h"
+
+#define CHECK__(x) { \
+ while ((x) < 0) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+#define CHECK_NOT_NULL__(x) { \
+ while ((x) == NULL) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+static int find_comm(struct evlist *evlist, const char *comm)
+{
+ union perf_event *event;
+ struct mmap *md;
+ int i, found;
+
+ found = 0;
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ if (event->header.type == PERF_RECORD_COMM &&
+ (pid_t)event->comm.pid == getpid() &&
+ (pid_t)event->comm.tid == getpid() &&
+ strcmp(event->comm.comm, comm) == 0)
+ found += 1;
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+ }
+ return found;
+}
+
+/**
+ * test__keep_tracking - test using a dummy software event to keep tracking.
+ *
+ * This function implements a test that checks that tracking events continue
+ * when an event is disabled but a dummy software event is not disabled. If the
+ * test passes %0 is returned, otherwise %-1 is returned.
+ */
+static int test__keep_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct perf_thread_map *threads = NULL;
+ struct perf_cpu_map *cpus = NULL;
+ struct evlist *evlist = NULL;
+ struct evsel *evsel = NULL;
+ int found, err = -1;
+ const char *comm;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ CHECK_NOT_NULL__(threads);
+
+ cpus = perf_cpu_map__new(NULL);
+ CHECK_NOT_NULL__(cpus);
+
+ evlist = evlist__new();
+ CHECK_NOT_NULL__(evlist);
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ CHECK__(parse_event(evlist, "dummy:u"));
+ CHECK__(parse_event(evlist, "cycles:u"));
+
+ evlist__config(evlist, &opts, NULL);
+
+ evsel = evlist__first(evlist);
+
+ evsel->core.attr.comm = 1;
+ evsel->core.attr.disabled = 1;
+ evsel->core.attr.enable_on_exec = 0;
+
+ if (evlist__open(evlist) < 0) {
+ pr_debug("Unable to open dummy and cycles event\n");
+ err = TEST_SKIP;
+ goto out_err;
+ }
+
+ CHECK__(evlist__mmap(evlist, UINT_MAX));
+
+ /*
+ * First, test that a 'comm' event can be found when the event is
+ * enabled.
+ */
+
+ evlist__enable(evlist);
+
+ comm = "Test COMM 1";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
+
+ evlist__disable(evlist);
+
+ found = find_comm(evlist, comm);
+ if (found != 1) {
+ pr_debug("First time, failed to find tracking event.\n");
+ goto out_err;
+ }
+
+ /*
+ * Secondly, test that a 'comm' event can be found when the event is
+ * disabled with the dummy event still enabled.
+ */
+
+ evlist__enable(evlist);
+
+ evsel = evlist__last(evlist);
+
+ CHECK__(evsel__disable(evsel));
+
+ comm = "Test COMM 2";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
+
+ evlist__disable(evlist);
+
+ found = find_comm(evlist, comm);
+ if (found != 1) {
+ pr_debug("Second time, failed to find tracking event.\n");
+ goto out_err;
+ }
+
+ err = 0;
+
+out_err:
+ if (evlist) {
+ evlist__disable(evlist);
+ evlist__delete(evlist);
+ }
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+
+ return err;
+}
+
+DEFINE_SUITE("Use a dummy software event to keep tracking", keep_tracking);
diff --git a/tools/perf/tests/kmod-path.c b/tools/perf/tests/kmod-path.c
new file mode 100644
index 000000000..dfe1bd5da
--- /dev/null
+++ b/tools/perf/tests/kmod-path.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include "tests.h"
+#include "dso.h"
+#include "debug.h"
+#include "event.h"
+
+static int test(const char *path, bool alloc_name, bool kmod,
+ int comp, const char *name)
+{
+ struct kmod_path m;
+
+ memset(&m, 0x0, sizeof(m));
+
+ TEST_ASSERT_VAL("kmod_path__parse",
+ !__kmod_path__parse(&m, path, alloc_name));
+
+ pr_debug("%s - alloc name %d, kmod %d, comp %d, name '%s'\n",
+ path, alloc_name, m.kmod, m.comp, m.name);
+
+ TEST_ASSERT_VAL("wrong kmod", m.kmod == kmod);
+ TEST_ASSERT_VAL("wrong comp", m.comp == comp);
+
+ if (name)
+ TEST_ASSERT_VAL("wrong name", m.name && !strcmp(name, m.name));
+ else
+ TEST_ASSERT_VAL("wrong name", !m.name);
+
+ free(m.name);
+ return 0;
+}
+
+static int test_is_kernel_module(const char *path, int cpumode, bool expect)
+{
+ TEST_ASSERT_VAL("is_kernel_module",
+ (!!is_kernel_module(path, cpumode)) == (!!expect));
+ pr_debug("%s (cpumode: %d) - is_kernel_module: %s\n",
+ path, cpumode, expect ? "true" : "false");
+ return 0;
+}
+
+#define T(path, an, k, c, n) \
+ TEST_ASSERT_VAL("failed", !test(path, an, k, c, n))
+
+#define M(path, c, e) \
+ TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
+
+static int test__kmod_path__parse(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ /* path alloc_name kmod comp name */
+ T("/xxxx/xxxx/x-x.ko", true , true, 0 , "[x_x]");
+ T("/xxxx/xxxx/x-x.ko", false , true, 0 , NULL );
+ T("/xxxx/xxxx/x-x.ko", true , true, 0 , "[x_x]");
+ T("/xxxx/xxxx/x-x.ko", false , true, 0 , NULL );
+ M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+ M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true);
+ M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false);
+
+#ifdef HAVE_ZLIB_SUPPORT
+ /* path alloc_name kmod comp name */
+ T("/xxxx/xxxx/x.ko.gz", true , true, 1 , "[x]");
+ T("/xxxx/xxxx/x.ko.gz", false , true, 1 , NULL );
+ T("/xxxx/xxxx/x.ko.gz", true , true, 1 , "[x]");
+ T("/xxxx/xxxx/x.ko.gz", false , true, 1 , NULL );
+ M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+ M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
+ M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("/xxxx/xxxx/x.gz", true , false, 1 , "x.gz");
+ T("/xxxx/xxxx/x.gz", false , false, 1 , NULL );
+ T("/xxxx/xxxx/x.gz", true , false, 1 , "x.gz");
+ T("/xxxx/xxxx/x.gz", false , false, 1 , NULL );
+ M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_KERNEL, false);
+ M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("x.gz", true , false, 1 , "x.gz");
+ T("x.gz", false , false, 1 , NULL );
+ T("x.gz", true , false, 1 , "x.gz");
+ T("x.gz", false , false, 1 , NULL );
+ M("x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("x.gz", PERF_RECORD_MISC_KERNEL, false);
+ M("x.gz", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("x.ko.gz", true , true, 1 , "[x]");
+ T("x.ko.gz", false , true, 1 , NULL );
+ T("x.ko.gz", true , true, 1 , "[x]");
+ T("x.ko.gz", false , true, 1 , NULL );
+ M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+ M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
+ M("x.ko.gz", PERF_RECORD_MISC_USER, false);
+#endif
+
+ /* path alloc_name kmod comp name */
+ T("[test_module]", true , true, false, "[test_module]");
+ T("[test_module]", false , true, false, NULL );
+ T("[test_module]", true , true, false, "[test_module]");
+ T("[test_module]", false , true, false, NULL );
+ M("[test_module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+ M("[test_module]", PERF_RECORD_MISC_KERNEL, true);
+ M("[test_module]", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("[test.module]", true , true, false, "[test.module]");
+ T("[test.module]", false , true, false, NULL );
+ T("[test.module]", true , true, false, "[test.module]");
+ T("[test.module]", false , true, false, NULL );
+ M("[test.module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+ M("[test.module]", PERF_RECORD_MISC_KERNEL, true);
+ M("[test.module]", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("[vdso]", true , false, false, "[vdso]");
+ T("[vdso]", false , false, false, NULL );
+ T("[vdso]", true , false, false, "[vdso]");
+ T("[vdso]", false , false, false, NULL );
+ M("[vdso]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vdso]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vdso]", PERF_RECORD_MISC_USER, false);
+
+ T("[vdso32]", true , false, false, "[vdso32]");
+ T("[vdso32]", false , false, false, NULL );
+ T("[vdso32]", true , false, false, "[vdso32]");
+ T("[vdso32]", false , false, false, NULL );
+ M("[vdso32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vdso32]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vdso32]", PERF_RECORD_MISC_USER, false);
+
+ T("[vdsox32]", true , false, false, "[vdsox32]");
+ T("[vdsox32]", false , false, false, NULL );
+ T("[vdsox32]", true , false, false, "[vdsox32]");
+ T("[vdsox32]", false , false, false, NULL );
+ M("[vdsox32]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vdsox32]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vdsox32]", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("[vsyscall]", true , false, false, "[vsyscall]");
+ T("[vsyscall]", false , false, false, NULL );
+ T("[vsyscall]", true , false, false, "[vsyscall]");
+ T("[vsyscall]", false , false, false, NULL );
+ M("[vsyscall]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[vsyscall]", PERF_RECORD_MISC_KERNEL, false);
+ M("[vsyscall]", PERF_RECORD_MISC_USER, false);
+
+ /* path alloc_name kmod comp name */
+ T("[kernel.kallsyms]", true , false, false, "[kernel.kallsyms]");
+ T("[kernel.kallsyms]", false , false, false, NULL );
+ T("[kernel.kallsyms]", true , false, false, "[kernel.kallsyms]");
+ T("[kernel.kallsyms]", false , false, false, NULL );
+ M("[kernel.kallsyms]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+ M("[kernel.kallsyms]", PERF_RECORD_MISC_KERNEL, false);
+ M("[kernel.kallsyms]", PERF_RECORD_MISC_USER, false);
+
+ return 0;
+}
+
+DEFINE_SUITE("kmod_path__parse", kmod_path__parse);
diff --git a/tools/perf/tests/llvm.c b/tools/perf/tests/llvm.c
new file mode 100644
index 000000000..0bc25a56c
--- /dev/null
+++ b/tools/perf/tests/llvm.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "tests.h"
+#include "debug.h"
+
+#ifdef HAVE_LIBBPF_SUPPORT
+#include <bpf/libbpf.h>
+#include <util/llvm-utils.h>
+#include "llvm.h"
+static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
+{
+ struct bpf_object *obj;
+
+ obj = bpf_object__open_mem(obj_buf, obj_buf_sz, NULL);
+ if (libbpf_get_error(obj))
+ return TEST_FAIL;
+ bpf_object__close(obj);
+ return TEST_OK;
+}
+
+static struct {
+ const char *source;
+ const char *desc;
+ bool should_load_fail;
+} bpf_source_table[__LLVM_TESTCASE_MAX] = {
+ [LLVM_TESTCASE_BASE] = {
+ .source = test_llvm__bpf_base_prog,
+ .desc = "Basic BPF llvm compile",
+ },
+ [LLVM_TESTCASE_KBUILD] = {
+ .source = test_llvm__bpf_test_kbuild_prog,
+ .desc = "kbuild searching",
+ },
+ [LLVM_TESTCASE_BPF_PROLOGUE] = {
+ .source = test_llvm__bpf_test_prologue_prog,
+ .desc = "Compile source for BPF prologue generation",
+ },
+ [LLVM_TESTCASE_BPF_RELOCATION] = {
+ .source = test_llvm__bpf_test_relocation,
+ .desc = "Compile source for BPF relocation",
+ .should_load_fail = true,
+ },
+};
+
+int
+test_llvm__fetch_bpf_obj(void **p_obj_buf,
+ size_t *p_obj_buf_sz,
+ enum test_llvm__testcase idx,
+ bool force,
+ bool *should_load_fail)
+{
+ const char *source;
+ const char *desc;
+ const char *tmpl_old, *clang_opt_old;
+ char *tmpl_new = NULL, *clang_opt_new = NULL;
+ int err, old_verbose, ret = TEST_FAIL;
+
+ if (idx >= __LLVM_TESTCASE_MAX)
+ return TEST_FAIL;
+
+ source = bpf_source_table[idx].source;
+ desc = bpf_source_table[idx].desc;
+ if (should_load_fail)
+ *should_load_fail = bpf_source_table[idx].should_load_fail;
+
+ /*
+ * Skip this test if user's .perfconfig doesn't set [llvm] section
+ * and clang is not found in $PATH
+ */
+ if (!force && (!llvm_param.user_set_param &&
+ llvm__search_clang())) {
+ pr_debug("No clang, skip this test\n");
+ return TEST_SKIP;
+ }
+
+ /*
+ * llvm is verbosity when error. Suppress all error output if
+ * not 'perf test -v'.
+ */
+ old_verbose = verbose;
+ if (verbose == 0)
+ verbose = -1;
+
+ *p_obj_buf = NULL;
+ *p_obj_buf_sz = 0;
+
+ if (!llvm_param.clang_bpf_cmd_template)
+ goto out;
+
+ if (!llvm_param.clang_opt)
+ llvm_param.clang_opt = strdup("");
+
+ err = asprintf(&tmpl_new, "echo '%s' | %s%s", source,
+ llvm_param.clang_bpf_cmd_template,
+ old_verbose ? "" : " 2>/dev/null");
+ if (err < 0)
+ goto out;
+ err = asprintf(&clang_opt_new, "-xc %s", llvm_param.clang_opt);
+ if (err < 0)
+ goto out;
+
+ tmpl_old = llvm_param.clang_bpf_cmd_template;
+ llvm_param.clang_bpf_cmd_template = tmpl_new;
+ clang_opt_old = llvm_param.clang_opt;
+ llvm_param.clang_opt = clang_opt_new;
+
+ err = llvm__compile_bpf("-", p_obj_buf, p_obj_buf_sz);
+
+ llvm_param.clang_bpf_cmd_template = tmpl_old;
+ llvm_param.clang_opt = clang_opt_old;
+
+ verbose = old_verbose;
+ if (err)
+ goto out;
+
+ ret = TEST_OK;
+out:
+ free(tmpl_new);
+ free(clang_opt_new);
+ if (ret != TEST_OK)
+ pr_debug("Failed to compile test case: '%s'\n", desc);
+ return ret;
+}
+
+static int test__llvm(int subtest)
+{
+ int ret;
+ void *obj_buf = NULL;
+ size_t obj_buf_sz = 0;
+ bool should_load_fail = false;
+
+ if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
+ return TEST_FAIL;
+
+ ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+ subtest, false, &should_load_fail);
+
+ if (ret == TEST_OK && !should_load_fail) {
+ ret = test__bpf_parsing(obj_buf, obj_buf_sz);
+ if (ret != TEST_OK) {
+ pr_debug("Failed to parse test case '%s'\n",
+ bpf_source_table[subtest].desc);
+ }
+ }
+ free(obj_buf);
+
+ return ret;
+}
+#endif //HAVE_LIBBPF_SUPPORT
+
+static int test__llvm__bpf_base_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BASE);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+static int test__llvm__bpf_test_kbuild_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_KBUILD);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+static int test__llvm__bpf_test_prologue_prog(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BPF_PROLOGUE);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+static int test__llvm__bpf_test_relocation(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#ifdef HAVE_LIBBPF_SUPPORT
+ return test__llvm(LLVM_TESTCASE_BPF_RELOCATION);
+#else
+ pr_debug("Skip LLVM test because BPF support is not compiled\n");
+ return TEST_SKIP;
+#endif
+}
+
+
+static struct test_case llvm_tests[] = {
+#ifdef HAVE_LIBBPF_SUPPORT
+ TEST_CASE("Basic BPF llvm compile", llvm__bpf_base_prog),
+ TEST_CASE("kbuild searching", llvm__bpf_test_kbuild_prog),
+ TEST_CASE("Compile source for BPF prologue generation",
+ llvm__bpf_test_prologue_prog),
+ TEST_CASE("Compile source for BPF relocation", llvm__bpf_test_relocation),
+#else
+ TEST_CASE_REASON("Basic BPF llvm compile", llvm__bpf_base_prog, "not compiled in"),
+ TEST_CASE_REASON("kbuild searching", llvm__bpf_test_kbuild_prog, "not compiled in"),
+ TEST_CASE_REASON("Compile source for BPF prologue generation",
+ llvm__bpf_test_prologue_prog, "not compiled in"),
+ TEST_CASE_REASON("Compile source for BPF relocation",
+ llvm__bpf_test_relocation, "not compiled in"),
+#endif
+ { .name = NULL, }
+};
+
+struct test_suite suite__llvm = {
+ .desc = "LLVM search and compile",
+ .test_cases = llvm_tests,
+};
diff --git a/tools/perf/tests/llvm.h b/tools/perf/tests/llvm.h
new file mode 100644
index 000000000..f68b0d9b8
--- /dev/null
+++ b/tools/perf/tests/llvm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef PERF_TEST_LLVM_H
+#define PERF_TEST_LLVM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h> /* for size_t */
+#include <stdbool.h> /* for bool */
+
+extern const char test_llvm__bpf_base_prog[];
+extern const char test_llvm__bpf_test_kbuild_prog[];
+extern const char test_llvm__bpf_test_prologue_prog[];
+extern const char test_llvm__bpf_test_relocation[];
+
+enum test_llvm__testcase {
+ LLVM_TESTCASE_BASE,
+ LLVM_TESTCASE_KBUILD,
+ LLVM_TESTCASE_BPF_PROLOGUE,
+ LLVM_TESTCASE_BPF_RELOCATION,
+ __LLVM_TESTCASE_MAX,
+};
+
+int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
+ enum test_llvm__testcase index, bool force,
+ bool *should_load_fail);
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
new file mode 100644
index 000000000..da013e90a
--- /dev/null
+++ b/tools/perf/tests/make
@@ -0,0 +1,412 @@
+include ../scripts/Makefile.include
+
+ifndef MK
+ifeq ($(MAKECMDGOALS),)
+# no target specified, trigger the whole suite
+all:
+ @echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile
+ @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1
+else
+# run only specific test over 'Makefile'
+%:
+ @echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile $@
+endif
+else
+PERF := .
+PERF_O := $(PERF)
+O_OPT :=
+FULL_O := $(shell readlink -f $(PERF_O) || echo $(PERF_O))
+
+ifneq ($(O),)
+ FULL_O := $(shell readlink -f $(O) || echo $(O))
+ PERF_O := $(FULL_O)
+ ifeq ($(SET_O),1)
+ O_OPT := 'O=$(FULL_O)'
+ endif
+ K_O_OPT := 'O=$(FULL_O)'
+endif
+
+PARALLEL_OPT=
+ifeq ($(SET_PARALLEL),1)
+ ifeq ($(JOBS),)
+ cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ ifeq ($(cores),0)
+ cores := 1
+ endif
+ else
+ cores=$(JOBS)
+ endif
+ PARALLEL_OPT="-j$(cores)"
+endif
+
+# As per kernel Makefile, avoid funny character set dependencies
+unexport LC_ALL
+LC_COLLATE=C
+LC_NUMERIC=C
+export LC_COLLATE LC_NUMERIC
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+include $(srctree)/tools/scripts/Makefile.arch
+
+# FIXME looks like x86 is the only arch running tests ;-)
+# we need some IS_(32/64) flag to make this generic
+ifeq ($(ARCH)$(IS_64_BIT), x861)
+lib = lib64
+else
+lib = lib
+endif
+
+has = $(shell which $1 2>/dev/null)
+
+# standard single make variable specified
+make_clean_all := clean all
+make_python_perf_so := python/perf.so
+make_debug := DEBUG=1
+make_no_libperl := NO_LIBPERL=1
+make_no_libpython := NO_LIBPYTHON=1
+make_no_scripts := NO_LIBPYTHON=1 NO_LIBPERL=1
+make_no_newt := NO_NEWT=1
+make_no_slang := NO_SLANG=1
+make_no_gtk2 := NO_GTK2=1
+make_no_ui := NO_NEWT=1 NO_SLANG=1 NO_GTK2=1
+make_no_demangle := NO_DEMANGLE=1
+make_no_libelf := NO_LIBELF=1
+make_no_libunwind := NO_LIBUNWIND=1
+make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
+make_no_backtrace := NO_BACKTRACE=1
+make_no_libnuma := NO_LIBNUMA=1
+make_no_libaudit := NO_LIBAUDIT=1
+make_no_libbionic := NO_LIBBIONIC=1
+make_no_auxtrace := NO_AUXTRACE=1
+make_no_libbpf := NO_LIBBPF=1
+make_libbpf_dynamic := LIBBPF_DYNAMIC=1
+make_no_libbpf_DEBUG := NO_LIBBPF=1 DEBUG=1
+make_no_libcrypto := NO_LIBCRYPTO=1
+make_with_babeltrace:= LIBBABELTRACE=1
+make_with_coresight := CORESIGHT=1
+make_no_sdt := NO_SDT=1
+make_no_syscall_tbl := NO_SYSCALL_TABLE=1
+make_with_clangllvm := LIBCLANGLLVM=1
+make_with_libpfm4 := LIBPFM4=1
+make_with_gtk2 := GTK2=1
+make_tags := tags
+make_cscope := cscope
+make_help := help
+make_doc := doc
+make_perf_o := perf.o
+make_util_map_o := util/map.o
+make_util_pmu_bison_o := util/pmu-bison.o
+make_install := install
+make_install_bin := install-bin
+make_install_doc := install-doc
+make_install_man := install-man
+make_install_html := install-html
+make_install_info := install-info
+make_install_pdf := install-pdf
+make_install_prefix := install prefix=/tmp/krava
+make_install_prefix_slash := install prefix=/tmp/krava/
+make_static := LDFLAGS=-static NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 NO_JVMTI=1
+
+# all the NO_* variable combined
+make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
+make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
+make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
+make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
+make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
+make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1
+
+# $(run) contains all available tests
+run := make_pure
+# Targets 'clean all' can be run together only through top level
+# Makefile because we detect clean target in Makefile.perf and
+# disable features detection
+ifeq ($(MK),Makefile)
+run += make_clean_all
+MAKE_F := $(MAKE)
+else
+MAKE_F := $(MAKE) -f $(MK)
+endif
+run += make_python_perf_so
+run += make_debug
+run += make_no_libperl
+run += make_no_libpython
+run += make_no_scripts
+run += make_no_newt
+run += make_no_slang
+run += make_no_gtk2
+run += make_no_ui
+run += make_no_demangle
+run += make_no_libelf
+run += make_no_libunwind
+run += make_no_libdw_dwarf_unwind
+run += make_no_backtrace
+run += make_no_libnuma
+run += make_no_libaudit
+run += make_no_libbionic
+run += make_no_auxtrace
+run += make_no_libbpf
+run += make_libbpf_dynamic
+run += make_no_libbpf_DEBUG
+run += make_no_libcrypto
+run += make_no_sdt
+run += make_no_syscall_tbl
+run += make_with_babeltrace
+run += make_with_coresight
+run += make_with_clangllvm
+run += make_with_libpfm4
+run += make_help
+run += make_doc
+run += make_perf_o
+run += make_util_map_o
+run += make_util_pmu_bison_o
+run += make_install
+run += make_install_bin
+run += make_install_prefix
+run += make_install_prefix_slash
+# FIXME 'install-*' commented out till they're fixed
+# run += make_install_doc
+# run += make_install_man
+# run += make_install_html
+# run += make_install_info
+# run += make_install_pdf
+run += make_minimal
+
+ifneq ($(call has,ctags),)
+run += make_tags
+endif
+ifneq ($(call has,cscope),)
+run += make_cscope
+endif
+
+# $(run_O) contains same portion of $(run) tests with '_O' attached
+# to distinguish O=... tests
+run_O := $(addsuffix _O,$(run))
+
+# disable some tests for O=...
+run_O := $(filter-out make_python_perf_so_O,$(run_O))
+
+# define test for each compile as 'test_NAME' variable
+# with the test itself as a value
+test_make_tags = test -f tags
+test_make_cscope = test -f cscope.out
+
+test_make_tags_O := $(test_make_tags)
+test_make_cscope_O := $(test_make_cscope)
+
+test_ok := true
+test_make_help := $(test_ok)
+test_make_doc := $(test_ok)
+test_make_help_O := $(test_ok)
+test_make_doc_O := $(test_ok)
+
+test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
+
+test_make_perf_o := test -f $(PERF_O)/perf.o
+test_make_util_map_o := test -f $(PERF_O)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o
+
+define test_dest_files
+ for file in $(1); do \
+ if [ ! -x $$TMP_DEST/$$file ]; then \
+ echo " failed to find: $$file"; \
+ fi \
+ done
+endef
+
+installed_files_bin := bin/perf
+installed_files_bin += etc/bash_completion.d/perf
+installed_files_bin += libexec/perf-core/perf-archive
+
+installed_files_plugins := $(lib)/traceevent/plugins/plugin_cfg80211.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_scsi.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_xen.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_function.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_sched_switch.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_mac80211.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_kvm.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_kmem.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_hrtimer.so
+installed_files_plugins += $(lib)/traceevent/plugins/plugin_jbd2.so
+
+installed_files_all := $(installed_files_bin)
+installed_files_all += $(installed_files_plugins)
+
+test_make_install := $(call test_dest_files,$(installed_files_all))
+test_make_install_O := $(call test_dest_files,$(installed_files_all))
+test_make_install_bin := $(call test_dest_files,$(installed_files_bin))
+test_make_install_bin_O := $(call test_dest_files,$(installed_files_bin))
+
+# We prefix all installed files for make_install_prefix(_slash)
+# with '/tmp/krava' to match installed/prefix-ed files.
+installed_files_all_prefix := $(addprefix /tmp/krava/,$(installed_files_all))
+test_make_install_prefix := $(call test_dest_files,$(installed_files_all_prefix))
+test_make_install_prefix_O := $(call test_dest_files,$(installed_files_all_prefix))
+
+test_make_install_prefix_slash := $(test_make_install_prefix)
+test_make_install_prefix_slash_O := $(test_make_install_prefix_O)
+
+# FIXME nothing gets installed
+test_make_install_man := test -f $$TMP_DEST/share/man/man1/perf.1
+test_make_install_man_O := $(test_make_install_man)
+
+# FIXME nothing gets installed
+test_make_install_doc := $(test_ok)
+test_make_install_doc_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_html := $(test_ok)
+test_make_install_html_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_info := $(test_ok)
+test_make_install_info_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_pdf := $(test_ok)
+test_make_install_pdf_O := $(test_ok)
+
+test_make_libbpf_dynamic := ldd $(PERF_O)/perf | grep -q libbpf
+test_make_libbpf_dynamic_O := ldd $$TMP_O/perf | grep -q libbpf
+
+test_make_python_perf_so_O := test -f $$TMP_O/python/perf.so
+test_make_perf_o_O := test -f $$TMP_O/perf.o
+test_make_util_map_o_O := test -f $$TMP_O/util/map.o
+test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
+
+test_default = test -x $(PERF_O)/perf
+test = $(if $(test_$1),$(test_$1),$(test_default))
+
+test_default_O = test -x $$TMP_O/perf
+test_O = $(if $(test_$1),$(test_$1),$(test_default_O))
+
+all:
+
+ifdef SHUF
+run := $(shell shuf -e $(run))
+run_O := $(shell shuf -e $(run_O))
+endif
+
+max_width := $(shell echo $(run_O) | sed 's/ /\n/g' | wc -L)
+
+ifdef DEBUG
+d := $(info run $(run))
+d := $(info run_O $(run_O))
+endif
+
+MAKEFLAGS := --no-print-directory
+
+clean := @(cd $(PERF); $(MAKE_F) -s $(O_OPT) clean >/dev/null && $(MAKE) -s $(O_OPT) -C ../build clean >/dev/null)
+
+$(run):
+ $(call clean)
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
+ $(call test,$@) && \
+ rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+
+make_with_gtk2:
+ $(call clean)
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
+ $(call test,$@) && \
+ rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+
+make_static:
+ $(call clean)
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1; \
+ echo " test: $(call test,$@)" >> $@ 2>&1; \
+ $(call test,$@) && \
+ rm -rf $@ $$TMP_DEST || (cat $@ ; false)
+
+$(run_O):
+ $(call clean)
+ @TMP_O=$$(mktemp -d); \
+ TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && $(MAKE_F) $($(patsubst %_O,%,$@)) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST"; \
+ printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1 && \
+ echo " test: $(call test_O,$@)" >> $@ 2>&1; \
+ $(call test_O,$@) && \
+ rm -rf $@ $$TMP_O $$TMP_DEST || (cat $@ ; false)
+
+tarpkg:
+ @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
+ echo "- $@: $$cmd" && echo $$cmd > $@ && \
+ ( eval $$cmd ) >> $@ 2>&1 && \
+ rm -f $@
+
+KERNEL_O := ../..
+ifneq ($(O),)
+ KERNEL_O := $(O)
+endif
+
+make_kernelsrc:
+ @echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf"
+ $(call clean); \
+ (make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \
+ test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
+
+make_kernelsrc_tools:
+ @echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf"
+ $(call clean); \
+ (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
+ test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
+
+make_libperf:
+ @echo "- make -C lib";
+ make -C lib clean >$@ 2>&1; make -C lib >>$@ 2>&1 && rm $@
+
+FEATURES_DUMP_FILE := $(FULL_O)/BUILD_TEST_FEATURE_DUMP
+FEATURES_DUMP_FILE_STATIC := $(FULL_O)/BUILD_TEST_FEATURE_DUMP_STATIC
+
+all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
+ @echo OK
+ @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
+
+out: $(run_O)
+ @echo OK
+ @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
+
+ifeq ($(REUSE_FEATURES_DUMP),1)
+$(FEATURES_DUMP_FILE):
+ $(call clean)
+ @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) feature-dump"; \
+ echo "- $@: $$cmd" && echo $$cmd && \
+ ( eval $$cmd ) > /dev/null 2>&1
+
+$(FEATURES_DUMP_FILE_STATIC):
+ $(call clean)
+ @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) LDFLAGS='-static' feature-dump"; \
+ echo "- $@: $$cmd" && echo $$cmd && \
+ ( eval $$cmd ) > /dev/null 2>&1
+
+# Add feature dump dependency for run/run_O targets
+$(foreach t,$(run) $(run_O),$(eval \
+ $(t): $(if $(findstring make_static,$(t)),\
+ $(FEATURES_DUMP_FILE_STATIC),\
+ $(FEATURES_DUMP_FILE))))
+
+# Append 'FEATURES_DUMP=' option to all test cases. For example:
+# make_no_libbpf: NO_LIBBPF=1 --> NO_LIBBPF=1 FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP
+# make_static: LDFLAGS=-static --> LDFLAGS=-static FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP_STATIC
+$(foreach t,$(run),$(if $(findstring make_static,$(t)),\
+ $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE_STATIC)),\
+ $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE))))
+endif
+
+.PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools make_libperf
+endif # ifndef MK
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
new file mode 100644
index 000000000..a69988a89
--- /dev/null
+++ b/tools/perf/tests/maps.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include "tests.h"
+#include "map.h"
+#include "maps.h"
+#include "dso.h"
+#include "debug.h"
+
+struct map_def {
+ const char *name;
+ u64 start;
+ u64 end;
+};
+
+static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps)
+{
+ struct map *map;
+ unsigned int i = 0;
+
+ maps__for_each_entry(maps, map) {
+ if (i > 0)
+ TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size));
+
+ TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start);
+ TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end);
+ TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name));
+ TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 1);
+
+ i++;
+ }
+
+ return TEST_OK;
+}
+
+static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ unsigned int i;
+ struct map_def bpf_progs[] = {
+ { "bpf_prog_1", 200, 300 },
+ { "bpf_prog_2", 500, 600 },
+ { "bpf_prog_3", 800, 900 },
+ };
+ struct map_def merged12[] = {
+ { "kcore1", 100, 200 },
+ { "bpf_prog_1", 200, 300 },
+ { "kcore1", 300, 500 },
+ { "bpf_prog_2", 500, 600 },
+ { "kcore1", 600, 800 },
+ { "bpf_prog_3", 800, 900 },
+ { "kcore1", 900, 1000 },
+ };
+ struct map_def merged3[] = {
+ { "kcore1", 100, 200 },
+ { "bpf_prog_1", 200, 300 },
+ { "kcore1", 300, 500 },
+ { "bpf_prog_2", 500, 600 },
+ { "kcore1", 600, 800 },
+ { "bpf_prog_3", 800, 900 },
+ { "kcore1", 900, 1000 },
+ { "kcore3", 1000, 1100 },
+ };
+ struct map *map_kcore1, *map_kcore2, *map_kcore3;
+ int ret;
+ struct maps *maps = maps__new(NULL);
+
+ TEST_ASSERT_VAL("failed to create maps", maps);
+
+ for (i = 0; i < ARRAY_SIZE(bpf_progs); i++) {
+ struct map *map;
+
+ map = dso__new_map(bpf_progs[i].name);
+ TEST_ASSERT_VAL("failed to create map", map);
+
+ map->start = bpf_progs[i].start;
+ map->end = bpf_progs[i].end;
+ maps__insert(maps, map);
+ map__put(map);
+ }
+
+ map_kcore1 = dso__new_map("kcore1");
+ TEST_ASSERT_VAL("failed to create map", map_kcore1);
+
+ map_kcore2 = dso__new_map("kcore2");
+ TEST_ASSERT_VAL("failed to create map", map_kcore2);
+
+ map_kcore3 = dso__new_map("kcore3");
+ TEST_ASSERT_VAL("failed to create map", map_kcore3);
+
+ /* kcore1 map overlaps over all bpf maps */
+ map_kcore1->start = 100;
+ map_kcore1->end = 1000;
+
+ /* kcore2 map hides behind bpf_prog_2 */
+ map_kcore2->start = 550;
+ map_kcore2->end = 570;
+
+ /* kcore3 map hides behind bpf_prog_3, kcore1 and adds new map */
+ map_kcore3->start = 880;
+ map_kcore3->end = 1100;
+
+ ret = maps__merge_in(maps, map_kcore1);
+ TEST_ASSERT_VAL("failed to merge map", !ret);
+
+ ret = check_maps(merged12, ARRAY_SIZE(merged12), maps);
+ TEST_ASSERT_VAL("merge check failed", !ret);
+
+ ret = maps__merge_in(maps, map_kcore2);
+ TEST_ASSERT_VAL("failed to merge map", !ret);
+
+ ret = check_maps(merged12, ARRAY_SIZE(merged12), maps);
+ TEST_ASSERT_VAL("merge check failed", !ret);
+
+ ret = maps__merge_in(maps, map_kcore3);
+ TEST_ASSERT_VAL("failed to merge map", !ret);
+
+ ret = check_maps(merged3, ARRAY_SIZE(merged3), maps);
+ TEST_ASSERT_VAL("merge check failed", !ret);
+
+ maps__delete(maps);
+ return TEST_OK;
+}
+
+DEFINE_SUITE("maps__merge_in", maps__merge_in);
diff --git a/tools/perf/tests/mem.c b/tools/perf/tests/mem.c
new file mode 100644
index 000000000..56014ec7d
--- /dev/null
+++ b/tools/perf/tests/mem.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/map_symbol.h"
+#include "util/mem-events.h"
+#include "util/symbol.h"
+#include "linux/perf_event.h"
+#include "util/debug.h"
+#include "tests.h"
+#include <string.h>
+
+static int check(union perf_mem_data_src data_src,
+ const char *string)
+{
+ char out[100];
+ char failure[100];
+ struct mem_info mi = { .data_src = data_src };
+
+ int n;
+
+ n = perf_mem__snp_scnprintf(out, sizeof out, &mi);
+ n += perf_mem__lvl_scnprintf(out + n, sizeof out - n, &mi);
+ scnprintf(failure, sizeof failure, "unexpected %s", out);
+ TEST_ASSERT_VAL(failure, !strcmp(string, out));
+ return 0;
+}
+
+static int test__mem(struct test_suite *text __maybe_unused, int subtest __maybe_unused)
+{
+ int ret = 0;
+ union perf_mem_data_src src;
+
+ memset(&src, 0, sizeof(src));
+
+ src.mem_lvl = PERF_MEM_LVL_HIT;
+ src.mem_lvl_num = 4;
+
+ ret |= check(src, "N/AL4 hit");
+
+ src.mem_remote = 1;
+
+ ret |= check(src, "N/ARemote L4 hit");
+
+ src.mem_lvl = PERF_MEM_LVL_MISS;
+ src.mem_lvl_num = PERF_MEM_LVLNUM_PMEM;
+ src.mem_remote = 0;
+
+ ret |= check(src, "N/APMEM miss");
+
+ src.mem_remote = 1;
+
+ ret |= check(src, "N/ARemote PMEM miss");
+
+ src.mem_snoopx = PERF_MEM_SNOOPX_FWD;
+ src.mem_lvl_num = PERF_MEM_LVLNUM_RAM;
+
+ ret |= check(src , "FwdRemote RAM miss");
+
+ return ret;
+}
+
+DEFINE_SUITE("Test data source output", mem);
diff --git a/tools/perf/tests/mem2node.c b/tools/perf/tests/mem2node.c
new file mode 100644
index 000000000..4c9682951
--- /dev/null
+++ b/tools/perf/tests/mem2node.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/zalloc.h>
+#include <perf/cpumap.h>
+#include <internal/cpumap.h>
+#include "debug.h"
+#include "env.h"
+#include "mem2node.h"
+#include "tests.h"
+
+static struct node {
+ int node;
+ const char *map;
+} test_nodes[] = {
+ { .node = 0, .map = "0" },
+ { .node = 1, .map = "1-2" },
+ { .node = 3, .map = "5-7,9" },
+};
+
+#define T TEST_ASSERT_VAL
+
+static unsigned long *get_bitmap(const char *str, int nbits)
+{
+ struct perf_cpu_map *map = perf_cpu_map__new(str);
+ unsigned long *bm = NULL;
+
+ bm = bitmap_zalloc(nbits);
+
+ if (map && bm) {
+ struct perf_cpu cpu;
+ int i;
+
+ perf_cpu_map__for_each_cpu(cpu, i, map)
+ set_bit(cpu.cpu, bm);
+ }
+
+ if (map)
+ perf_cpu_map__put(map);
+ else
+ free(bm);
+
+ return bm && map ? bm : NULL;
+}
+
+static int test__mem2node(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ struct mem2node map;
+ struct memory_node nodes[3];
+ struct perf_env env = {
+ .memory_nodes = (struct memory_node *) &nodes[0],
+ .nr_memory_nodes = ARRAY_SIZE(nodes),
+ .memory_bsize = 0x100,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nodes); i++) {
+ nodes[i].node = test_nodes[i].node;
+ nodes[i].size = 10;
+
+ T("failed: alloc bitmap",
+ (nodes[i].set = get_bitmap(test_nodes[i].map, 10)));
+ }
+
+ T("failed: mem2node__init", !mem2node__init(&map, &env));
+ T("failed: mem2node__node", 0 == mem2node__node(&map, 0x50));
+ T("failed: mem2node__node", 1 == mem2node__node(&map, 0x100));
+ T("failed: mem2node__node", 1 == mem2node__node(&map, 0x250));
+ T("failed: mem2node__node", 3 == mem2node__node(&map, 0x500));
+ T("failed: mem2node__node", 3 == mem2node__node(&map, 0x650));
+ T("failed: mem2node__node", -1 == mem2node__node(&map, 0x450));
+ T("failed: mem2node__node", -1 == mem2node__node(&map, 0x1050));
+
+ for (i = 0; i < ARRAY_SIZE(nodes); i++)
+ zfree(&nodes[i].set);
+
+ mem2node__exit(&map);
+ return 0;
+}
+
+DEFINE_SUITE("mem2node", mem2node);
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
new file mode 100644
index 000000000..8322fc229
--- /dev/null
+++ b/tools/perf/tests/mmap-basic.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <perf/cpumap.h>
+
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "tests.h"
+#include "util/mmap.h"
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+/*
+ * This test will generate random numbers of calls to some getpid syscalls,
+ * then establish an mmap for a group of events that are created to monitor
+ * the syscalls.
+ *
+ * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
+ * sample.id field to map back to its respective perf_evsel instance.
+ *
+ * Then it checks if the number of syscalls reported as perf events by
+ * the kernel corresponds to the number of syscalls made.
+ */
+static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ union perf_event *event;
+ struct perf_thread_map *threads;
+ struct perf_cpu_map *cpus;
+ struct evlist *evlist;
+ cpu_set_t cpu_set;
+ const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
+ pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
+#define nsyscalls ARRAY_SIZE(syscall_names)
+ unsigned int nr_events[nsyscalls],
+ expected_nr_events[nsyscalls], i, j;
+ struct evsel *evsels[nsyscalls], *evsel;
+ char sbuf[STRERR_BUFSIZE];
+ struct mmap *md;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ cpus = perf_cpu_map__new(NULL);
+ if (cpus == NULL) {
+ pr_debug("perf_cpu_map__new\n");
+ goto out_free_threads;
+ }
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(perf_cpu_map__cpu(cpus, 0).cpu, &cpu_set);
+ sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+ if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+ pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+ perf_cpu_map__cpu(cpus, 0).cpu,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_free_cpus;
+ }
+
+ evlist = evlist__new();
+ if (evlist == NULL) {
+ pr_debug("evlist__new\n");
+ goto out_free_cpus;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ for (i = 0; i < nsyscalls; ++i) {
+ char name[64];
+
+ snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
+ evsels[i] = evsel__newtp("syscalls", name);
+ if (IS_ERR(evsels[i])) {
+ pr_debug("evsel__new(%s)\n", name);
+ if (PTR_ERR(evsels[i]) == -EACCES) {
+ /* Permissions failure, flag the failure as a skip. */
+ err = TEST_SKIP;
+ }
+ goto out_delete_evlist;
+ }
+
+ evsels[i]->core.attr.wakeup_events = 1;
+ evsel__set_sample_id(evsels[i], false);
+
+ evlist__add(evlist, evsels[i]);
+
+ if (evsel__open(evsels[i], cpus, threads) < 0) {
+ pr_debug("failed to open counter: %s, "
+ "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ nr_events[i] = 0;
+ expected_nr_events[i] = 1 + rand() % 127;
+ }
+
+ if (evlist__mmap(evlist, 128) < 0) {
+ pr_debug("failed to mmap events: %d (%s)\n", errno,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ for (i = 0; i < nsyscalls; ++i)
+ for (j = 0; j < expected_nr_events[i]; ++j) {
+ syscalls[i]();
+ }
+
+ md = &evlist->mmap[0];
+ if (perf_mmap__read_init(&md->core) < 0)
+ goto out_init;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_SAMPLE) {
+ pr_debug("unexpected %s event\n",
+ perf_event__name(event->header.type));
+ goto out_delete_evlist;
+ }
+
+ err = evlist__parse_sample(evlist, event, &sample);
+ if (err) {
+ pr_err("Can't parse sample, err = %d\n", err);
+ goto out_delete_evlist;
+ }
+
+ err = -1;
+ evsel = evlist__id2evsel(evlist, sample.id);
+ if (evsel == NULL) {
+ pr_debug("event with id %" PRIu64
+ " doesn't map to an evsel\n", sample.id);
+ goto out_delete_evlist;
+ }
+ nr_events[evsel->core.idx]++;
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+
+out_init:
+ err = 0;
+ evlist__for_each_entry(evlist, evsel) {
+ if (nr_events[evsel->core.idx] != expected_nr_events[evsel->core.idx]) {
+ pr_debug("expected %d %s events, got %d\n",
+ expected_nr_events[evsel->core.idx],
+ evsel__name(evsel), nr_events[evsel->core.idx]);
+ err = -1;
+ goto out_delete_evlist;
+ }
+ }
+
+out_delete_evlist:
+ evlist__delete(evlist);
+out_free_cpus:
+ perf_cpu_map__put(cpus);
+out_free_threads:
+ perf_thread_map__put(threads);
+ return err;
+}
+
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ int err, i, ret = TEST_FAIL;
+ bool opened = false, mapped = false;
+
+ threads = perf_thread_map__new_dummy();
+ TEST_ASSERT_VAL("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ TEST_ASSERT_VAL("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err) {
+ pr_err("failed to open evsel: %s\n", strerror(-err));
+ ret = TEST_SKIP;
+ goto out;
+ }
+ opened = true;
+
+ err = perf_evsel__mmap(evsel, 0);
+ if (err) {
+ pr_err("failed to mmap evsel: %s\n", strerror(-err));
+ goto out;
+ }
+ mapped = true;
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ if (!pc) {
+ pr_err("failed to get mmapped address\n");
+ goto out;
+ }
+
+ if (!pc->cap_user_rdpmc || !pc->index) {
+ pr_err("userspace counter access not %s\n",
+ !pc->cap_user_rdpmc ? "supported" : "enabled");
+ ret = TEST_SKIP;
+ goto out;
+ }
+ if (pc->pmc_width < 32) {
+ pr_err("userspace counter width not set (%d)\n", pc->pmc_width);
+ goto out;
+ }
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ if (counts.val == 0) {
+ pr_err("failed to read value for evsel\n");
+ goto out;
+ }
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ pr_debug("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ if ((end - start) < last) {
+ pr_err("invalid counter data: end=%llu start=%llu last= %llu\n",
+ end, start, last);
+ goto out;
+ }
+ last = end - start;
+ pr_debug("count = %llu\n", end - start);
+ }
+ ret = TEST_OK;
+
+out:
+ if (mapped)
+ perf_evsel__munmap(evsel);
+ if (opened)
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return ret;
+}
+
+static int test__mmap_user_read_instr(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+}
+
+static int test__mmap_user_read_cycles(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+}
+
+static struct test_case tests__basic_mmap[] = {
+ TEST_CASE_REASON("Read samples using the mmap interface",
+ basic_mmap,
+ "permissions"),
+ TEST_CASE_REASON("User space counter reading of instructions",
+ mmap_user_read_instr,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
+ TEST_CASE_REASON("User space counter reading of cycles",
+ mmap_user_read_cycles,
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ "permissions"
+#else
+ "unsupported"
+#endif
+ ),
+ { .name = NULL, }
+};
+
+struct test_suite suite__basic_mmap = {
+ .desc = "mmap interface tests",
+ .test_cases = tests__basic_mmap,
+};
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
new file mode 100644
index 000000000..a4301fc7b
--- /dev/null
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include "debug.h"
+#include "event.h"
+#include "tests.h"
+#include "machine.h"
+#include "thread_map.h"
+#include "map.h"
+#include "symbol.h"
+#include "util/synthetic-events.h"
+#include "thread.h"
+#include <internal/lib.h> // page_size
+
+#define THREADS 4
+
+static int go_away;
+
+struct thread_data {
+ pthread_t pt;
+ pid_t tid;
+ void *map;
+ int ready[2];
+};
+
+static struct thread_data threads[THREADS];
+
+static int thread_init(struct thread_data *td)
+{
+ void *map;
+
+ map = mmap(NULL, page_size,
+ PROT_READ|PROT_WRITE|PROT_EXEC,
+ MAP_SHARED|MAP_ANONYMOUS, -1, 0);
+
+ if (map == MAP_FAILED) {
+ perror("mmap failed");
+ return -1;
+ }
+
+ td->map = map;
+ td->tid = syscall(SYS_gettid);
+
+ pr_debug("tid = %d, map = %p\n", td->tid, map);
+ return 0;
+}
+
+static void *thread_fn(void *arg)
+{
+ struct thread_data *td = arg;
+ ssize_t ret;
+ int go = 0;
+
+ if (thread_init(td))
+ return NULL;
+
+ /* Signal thread_create thread is initialized. */
+ ret = write(td->ready[1], &go, sizeof(int));
+ if (ret != sizeof(int)) {
+ pr_err("failed to notify\n");
+ return NULL;
+ }
+
+ while (!go_away) {
+ /* Waiting for main thread to kill us. */
+ usleep(100);
+ }
+
+ munmap(td->map, page_size);
+ return NULL;
+}
+
+static int thread_create(int i)
+{
+ struct thread_data *td = &threads[i];
+ int err, go;
+
+ if (pipe(td->ready))
+ return -1;
+
+ err = pthread_create(&td->pt, NULL, thread_fn, td);
+ if (!err) {
+ /* Wait for thread initialization. */
+ ssize_t ret = read(td->ready[0], &go, sizeof(int));
+ err = ret != sizeof(int);
+ }
+
+ close(td->ready[0]);
+ close(td->ready[1]);
+ return err;
+}
+
+static int threads_create(void)
+{
+ struct thread_data *td0 = &threads[0];
+ int i, err = 0;
+
+ go_away = 0;
+
+ /* 0 is main thread */
+ if (thread_init(td0))
+ return -1;
+
+ for (i = 1; !err && i < THREADS; i++)
+ err = thread_create(i);
+
+ return err;
+}
+
+static int threads_destroy(void)
+{
+ struct thread_data *td0 = &threads[0];
+ int i, err = 0;
+
+ /* cleanup the main thread */
+ munmap(td0->map, page_size);
+
+ go_away = 1;
+
+ for (i = 1; !err && i < THREADS; i++)
+ err = pthread_join(threads[i].pt, NULL);
+
+ return err;
+}
+
+typedef int (*synth_cb)(struct machine *machine);
+
+static int synth_all(struct machine *machine)
+{
+ return perf_event__synthesize_threads(NULL,
+ perf_event__process,
+ machine, 1, 0, 1);
+}
+
+static int synth_process(struct machine *machine)
+{
+ struct perf_thread_map *map;
+ int err;
+
+ map = thread_map__new_by_pid(getpid());
+
+ err = perf_event__synthesize_thread_map(NULL, map,
+ perf_event__process,
+ machine, 1, 0);
+
+ perf_thread_map__put(map);
+ return err;
+}
+
+static int mmap_events(synth_cb synth)
+{
+ struct machine *machine;
+ int err, i;
+
+ /*
+ * The threads_create will not return before all threads
+ * are spawned and all created memory map.
+ *
+ * They will loop until threads_destroy is called, so we
+ * can safely run synthesizing function.
+ */
+ TEST_ASSERT_VAL("failed to create threads", !threads_create());
+
+ machine = machine__new_host();
+
+ dump_trace = verbose > 1 ? 1 : 0;
+
+ err = synth(machine);
+
+ dump_trace = 0;
+
+ TEST_ASSERT_VAL("failed to destroy threads", !threads_destroy());
+ TEST_ASSERT_VAL("failed to synthesize maps", !err);
+
+ /*
+ * All data is synthesized, try to find map for each
+ * thread object.
+ */
+ for (i = 0; i < THREADS; i++) {
+ struct thread_data *td = &threads[i];
+ struct addr_location al;
+ struct thread *thread;
+
+ thread = machine__findnew_thread(machine, getpid(), td->tid);
+
+ pr_debug("looking for map %p\n", td->map);
+
+ thread__find_map(thread, PERF_RECORD_MISC_USER,
+ (unsigned long) (td->map + 1), &al);
+
+ thread__put(thread);
+
+ if (!al.map) {
+ pr_debug("failed, couldn't find map\n");
+ err = -1;
+ break;
+ }
+
+ pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start);
+ }
+
+ machine__delete_threads(machine);
+ machine__delete(machine);
+ return err;
+}
+
+/*
+ * This test creates 'THREADS' number of threads (including
+ * main thread) and each thread creates memory map.
+ *
+ * When threads are created, we synthesize them with both
+ * (separate tests):
+ * perf_event__synthesize_thread_map (process based)
+ * perf_event__synthesize_threads (global)
+ *
+ * We test we can find all memory maps via:
+ * thread__find_map
+ *
+ * by using all thread objects.
+ */
+static int test__mmap_thread_lookup(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ /* perf_event__synthesize_threads synthesize */
+ TEST_ASSERT_VAL("failed with sythesizing all",
+ !mmap_events(synth_all));
+
+ /* perf_event__synthesize_thread_map synthesize */
+ TEST_ASSERT_VAL("failed with sythesizing process",
+ !mmap_events(synth_process));
+
+ return 0;
+}
+
+DEFINE_SUITE("Lookup mmap thread", mmap_thread_lookup);
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
new file mode 100644
index 000000000..f3275be83
--- /dev/null
+++ b/tools/perf/tests/openat-syscall-all-cpus.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+/* For the CPU_* macros */
+#include <sched.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <api/fs/fs.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <api/fs/tracing_path.h>
+#include "evsel.h"
+#include "tests.h"
+#include "thread_map.h"
+#include <perf/cpumap.h>
+#include "debug.h"
+#include "stat.h"
+#include "util/counts.h"
+
+static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err = TEST_FAIL, fd, idx;
+ struct perf_cpu cpu;
+ struct perf_cpu_map *cpus;
+ struct evsel *evsel;
+ unsigned int nr_openat_calls = 111, i;
+ cpu_set_t cpu_set;
+ struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ char sbuf[STRERR_BUFSIZE];
+ char errbuf[BUFSIZ];
+
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return -1;
+ }
+
+ cpus = perf_cpu_map__new(NULL);
+ if (cpus == NULL) {
+ pr_debug("perf_cpu_map__new\n");
+ goto out_thread_map_delete;
+ }
+
+ CPU_ZERO(&cpu_set);
+
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
+ if (IS_ERR(evsel)) {
+ tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
+ pr_debug("%s\n", errbuf);
+ err = TEST_SKIP;
+ goto out_cpu_map_delete;
+ }
+
+ if (evsel__open(evsel, cpus, threads) < 0) {
+ pr_debug("failed to open counter: %s, "
+ "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = TEST_SKIP;
+ goto out_evsel_delete;
+ }
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ unsigned int ncalls = nr_openat_calls + idx;
+ /*
+ * XXX eventually lift this restriction in a way that
+ * keeps perf building on older glibc installations
+ * without CPU_ALLOC. 1024 cpus in 2010 still seems
+ * a reasonable upper limit tho :-)
+ */
+ if (cpu.cpu >= CPU_SETSIZE) {
+ pr_debug("Ignoring CPU %d\n", cpu.cpu);
+ continue;
+ }
+
+ CPU_SET(cpu.cpu, &cpu_set);
+ if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+ pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+ cpu.cpu,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_close_fd;
+ }
+ for (i = 0; i < ncalls; ++i) {
+ fd = openat(0, "/etc/passwd", O_RDONLY);
+ close(fd);
+ }
+ CPU_CLR(cpu.cpu, &cpu_set);
+ }
+
+ evsel->core.cpus = perf_cpu_map__get(cpus);
+
+ err = TEST_OK;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ unsigned int expected;
+
+ if (cpu.cpu >= CPU_SETSIZE)
+ continue;
+
+ if (evsel__read_on_cpu(evsel, idx, 0) < 0) {
+ pr_debug("evsel__read_on_cpu\n");
+ err = TEST_FAIL;
+ break;
+ }
+
+ expected = nr_openat_calls + idx;
+ if (perf_counts(evsel->counts, idx, 0)->val != expected) {
+ pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+ expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val);
+ err = TEST_FAIL;
+ }
+ }
+
+ evsel__free_counts(evsel);
+out_close_fd:
+ perf_evsel__close_fd(&evsel->core);
+out_evsel_delete:
+ evsel__delete(evsel);
+out_cpu_map_delete:
+ perf_cpu_map__put(cpus);
+out_thread_map_delete:
+ perf_thread_map__put(threads);
+ return err;
+}
+
+
+static struct test_case tests__openat_syscall_event_on_all_cpus[] = {
+ TEST_CASE_REASON("Detect openat syscall event on all cpus",
+ openat_syscall_event_on_all_cpus,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__openat_syscall_event_on_all_cpus = {
+ .desc = "Detect openat syscall event on all cpus",
+ .test_cases = tests__openat_syscall_event_on_all_cpus,
+};
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
new file mode 100644
index 000000000..a7b280065
--- /dev/null
+++ b/tools/perf/tests/openat-syscall-tp-fields.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdbool.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "record.h"
+#include "tests.h"
+#include "debug.h"
+#include "util/mmap.h"
+#include <errno.h>
+#include <perf/mmap.h>
+
+#ifndef O_DIRECTORY
+#define O_DIRECTORY 00200000
+#endif
+#ifndef AT_FDCWD
+#define AT_FDCWD -100
+#endif
+
+static int test__syscall_openat_tp_fields(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .no_buffering = true,
+ .freq = 1,
+ .mmap_pages = 256,
+ .raw_samples = true,
+ };
+ const char *filename = "/etc/passwd";
+ int flags = O_RDONLY | O_DIRECTORY;
+ struct evlist *evlist = evlist__new();
+ struct evsel *evsel;
+ int err = -1, i, nr_events = 0, nr_polls = 0;
+ char sbuf[STRERR_BUFSIZE];
+
+ if (evlist == NULL) {
+ pr_debug("%s: evlist__new\n", __func__);
+ goto out;
+ }
+
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
+ if (IS_ERR(evsel)) {
+ pr_debug("%s: evsel__newtp\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ evlist__add(evlist, evsel);
+
+ err = evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("%s: evlist__create_maps\n", __func__);
+ goto out_delete_evlist;
+ }
+
+ evsel__config(evsel, &opts, NULL);
+
+ perf_thread_map__set_pid(evlist->core.threads, 0, getpid());
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ err = evlist__mmap(evlist, UINT_MAX);
+ if (err < 0) {
+ pr_debug("evlist__mmap: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ evlist__enable(evlist);
+
+ /*
+ * Generate the event:
+ */
+ openat(AT_FDCWD, filename, flags);
+
+ while (1) {
+ int before = nr_events;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ union perf_event *event;
+ struct mmap *md;
+
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ const u32 type = event->header.type;
+ int tp_flags;
+ struct perf_sample sample;
+
+ ++nr_events;
+
+ if (type != PERF_RECORD_SAMPLE) {
+ perf_mmap__consume(&md->core);
+ continue;
+ }
+
+ err = evsel__parse_sample(evsel, event, &sample);
+ if (err) {
+ pr_debug("Can't parse sample, err = %d\n", err);
+ goto out_delete_evlist;
+ }
+
+ tp_flags = evsel__intval(evsel, &sample, "flags");
+
+ if (flags != tp_flags) {
+ pr_debug("%s: Expected flags=%#x, got %#x\n",
+ __func__, flags, tp_flags);
+ goto out_delete_evlist;
+ }
+
+ goto out_ok;
+ }
+ perf_mmap__read_done(&md->core);
+ }
+
+ if (nr_events == before)
+ evlist__poll(evlist, 10);
+
+ if (++nr_polls > 5) {
+ pr_debug("%s: no events!\n", __func__);
+ goto out_delete_evlist;
+ }
+ }
+out_ok:
+ err = 0;
+out_delete_evlist:
+ evlist__delete(evlist);
+out:
+ return err;
+}
+
+DEFINE_SUITE("syscalls:sys_enter_openat event fields", syscall_openat_tp_fields);
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
new file mode 100644
index 000000000..7e05b8b5c
--- /dev/null
+++ b/tools/perf/tests/openat-syscall.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+#include <api/fs/tracing_path.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "thread_map.h"
+#include "evsel.h"
+#include "debug.h"
+#include "tests.h"
+#include "util/counts.h"
+
+static int test__openat_syscall_event(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err = TEST_FAIL, fd;
+ struct evsel *evsel;
+ unsigned int nr_openat_calls = 111, i;
+ struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+ char sbuf[STRERR_BUFSIZE];
+ char errbuf[BUFSIZ];
+
+ if (threads == NULL) {
+ pr_debug("thread_map__new\n");
+ return TEST_FAIL;
+ }
+
+ evsel = evsel__newtp("syscalls", "sys_enter_openat");
+ if (IS_ERR(evsel)) {
+ tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
+ pr_debug("%s\n", errbuf);
+ err = TEST_SKIP;
+ goto out_thread_map_delete;
+ }
+
+ if (evsel__open_per_thread(evsel, threads) < 0) {
+ pr_debug("failed to open counter: %s, "
+ "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = TEST_SKIP;
+ goto out_evsel_delete;
+ }
+
+ for (i = 0; i < nr_openat_calls; ++i) {
+ fd = openat(0, "/etc/passwd", O_RDONLY);
+ close(fd);
+ }
+
+ if (evsel__read_on_cpu(evsel, 0, 0) < 0) {
+ pr_debug("evsel__read_on_cpu\n");
+ goto out_close_fd;
+ }
+
+ if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) {
+ pr_debug("evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
+ nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val);
+ goto out_close_fd;
+ }
+
+ err = TEST_OK;
+out_close_fd:
+ perf_evsel__close_fd(&evsel->core);
+out_evsel_delete:
+ evsel__delete(evsel);
+out_thread_map_delete:
+ perf_thread_map__put(threads);
+ return err;
+}
+
+static struct test_case tests__openat_syscall_event[] = {
+ TEST_CASE_REASON("Detect openat syscall event",
+ openat_syscall_event,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__openat_syscall_event = {
+ .desc = "Detect openat syscall event",
+ .test_cases = tests__openat_syscall_event,
+};
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
new file mode 100644
index 000000000..459afdb25
--- /dev/null
+++ b/tools/perf/tests/parse-events.c
@@ -0,0 +1,2410 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "parse-events.h"
+#include "evsel.h"
+#include "evlist.h"
+#include <api/fs/fs.h>
+#include "tests.h"
+#include "debug.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
+#include <dirent.h>
+#include <errno.h>
+#include "fncache.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <linux/kernel.h>
+#include <linux/hw_breakpoint.h>
+#include <api/fs/tracing_path.h>
+
+#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+
+#if defined(__s390x__)
+/* Return true if kvm module is available and loaded. Test this
+ * and return success when trace point kvm_s390_create_vm
+ * exists. Otherwise this test always fails.
+ */
+static bool kvm_s390_create_vm_valid(void)
+{
+ char *eventfile;
+ bool rc = false;
+
+ eventfile = get_events_file("kvm-s390");
+
+ if (eventfile) {
+ DIR *mydir = opendir(eventfile);
+
+ if (mydir) {
+ rc = true;
+ closedir(mydir);
+ }
+ put_events_file(eventfile);
+ }
+
+ return rc;
+}
+#endif
+
+static int test__checkevent_tracepoint(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
+ return TEST_OK;
+}
+
+static int test__checkevent_tracepoint_multi(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
+ TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->core.nr_groups);
+
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_VAL("wrong type",
+ PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period",
+ 1 == evsel->core.attr.sample_period);
+ }
+ return TEST_OK;
+}
+
+static int test__checkevent_raw(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_numeric(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_symbolic_name(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_symbolic_name_config(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ /*
+ * The period value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period",
+ 0 == evsel->core.attr.sample_period);
+ TEST_ASSERT_VAL("wrong config1",
+ 0 == evsel->core.attr.config1);
+ TEST_ASSERT_VAL("wrong config2",
+ 1 == evsel->core.attr.config2);
+ return TEST_OK;
+}
+
+static int test__checkevent_symbolic_alias(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_genhw(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
+ evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
+ evsel->core.attr.bp_len);
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint_x(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type",
+ HW_BREAKPOINT_X == evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->core.attr.bp_len);
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint_r(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type",
+ PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type",
+ HW_BREAKPOINT_R == evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len",
+ HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint_w(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type",
+ PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type",
+ HW_BREAKPOINT_W == evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len",
+ HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint_rw(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type",
+ PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type",
+ (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len",
+ HW_BREAKPOINT_LEN_4 == evsel->core.attr.bp_len);
+ return TEST_OK;
+}
+
+static int test__checkevent_tracepoint_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_tracepoint(evlist);
+}
+
+static int
+test__checkevent_tracepoint_multi_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries > 1);
+
+ evlist__for_each_entry(evlist, evsel) {
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ }
+
+ return test__checkevent_tracepoint_multi(evlist);
+}
+
+static int test__checkevent_raw_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+
+ return test__checkevent_raw(evlist);
+}
+
+static int test__checkevent_numeric_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+
+ return test__checkevent_numeric(evlist);
+}
+
+static int test__checkevent_symbolic_name_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_exclude_host_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_exclude_guest_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_symbolic_alias_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_symbolic_alias(evlist);
+}
+
+static int test__checkevent_genhw_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+
+ return test__checkevent_genhw(evlist);
+}
+
+static int test__checkevent_exclude_idle_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_exclude_idle_modifier_1(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude idle", evsel->core.attr.exclude_idle);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__checkevent_breakpoint_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "mem:0:u"));
+
+ return test__checkevent_breakpoint(evlist);
+}
+
+static int test__checkevent_breakpoint_x_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "mem:0:x:k"));
+
+ return test__checkevent_breakpoint_x(evlist);
+}
+
+static int test__checkevent_breakpoint_r_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "mem:0:r:hp"));
+
+ return test__checkevent_breakpoint_r(evlist);
+}
+
+static int test__checkevent_breakpoint_w_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "mem:0:w:up"));
+
+ return test__checkevent_breakpoint_w(evlist);
+}
+
+static int test__checkevent_breakpoint_rw_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "mem:0:rw:kp"));
+
+ return test__checkevent_breakpoint_rw(evlist);
+}
+
+static int test__checkevent_pmu(struct evlist *evlist)
+{
+
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 10 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong config1", 1 == evsel->core.attr.config1);
+ TEST_ASSERT_VAL("wrong config2", 3 == evsel->core.attr.config2);
+ /*
+ * The period value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
+
+ return TEST_OK;
+}
+
+static int test__checkevent_list(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
+
+ /* r1 */
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong config1", 0 == evsel->core.attr.config1);
+ TEST_ASSERT_VAL("wrong config2", 0 == evsel->core.attr.config2);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ /* syscalls:sys_enter_openat:k */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ /* 1:1:hp */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+
+ return TEST_OK;
+}
+
+static int test__checkevent_pmu_name(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ /* cpu/config=1,name=krava/u */
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong name", !strcmp(evsel__name(evsel), "krava"));
+
+ /* cpu/config=2/u" */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 2 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(evsel__name(evsel), "cpu/config=2/u"));
+
+ return TEST_OK;
+}
+
+static int test__checkevent_pmu_partial_time_callgraph(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ /* cpu/config=1,call-graph=fp,time,period=100000/ */
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 1 == evsel->core.attr.config);
+ /*
+ * The period, time and callgraph value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
+ TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
+ TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
+
+ /* cpu/config=2,call-graph=no,time=0,period=2000/ */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 2 == evsel->core.attr.config);
+ /*
+ * The period, time and callgraph value gets configured within evlist__config,
+ * while this test executes only parse events method.
+ */
+ TEST_ASSERT_VAL("wrong period", 0 == evsel->core.attr.sample_period);
+ TEST_ASSERT_VAL("wrong callgraph", !evsel__has_callchain(evsel));
+ TEST_ASSERT_VAL("wrong time", !(PERF_SAMPLE_TIME & evsel->core.attr.sample_type));
+
+ return TEST_OK;
+}
+
+static int test__checkevent_pmu_events(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+
+ return TEST_OK;
+}
+
+
+static int test__checkevent_pmu_events_mix(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ /* pmu-event:u */
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+
+ /* cpu/pmu-event/u*/
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong exclude_user",
+ !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel",
+ evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.pinned);
+
+ return TEST_OK;
+}
+
+static int test__checkterms_simple(struct list_head *terms)
+{
+ struct parse_events_term *term;
+
+ /* config=10 */
+ term = list_entry(terms->next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 10);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
+
+ /* config1 */
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config1"));
+
+ /* config2=3 */
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 3);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config2"));
+
+ /* umask=1*/
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
+
+ /*
+ * read
+ *
+ * The perf_pmu__test_parse_init injects 'read' term into
+ * perf_pmu_events_list, so 'read' is evaluated as read term
+ * and not as raw event with 'ead' hex value.
+ */
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "read"));
+
+ /*
+ * r0xead
+ *
+ * To be still able to pass 'ead' value with 'r' syntax,
+ * we added support to parse 'r0xHEX' event.
+ */
+ term = list_entry(term->list.next, struct parse_events_term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 0xead);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "config"));
+ return TEST_OK;
+}
+
+static int test__group1(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* instructions:k */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* cycles:upp */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__group2(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* faults + :ku modifier */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_SW_PAGE_FAULTS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* cache-references + :u modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_REFERENCES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* cycles:k */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__group3(struct evlist *evlist __maybe_unused)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups);
+
+ /* group1 syscalls:sys_enter_openat:H */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong sample_type",
+ PERF_TP_SAMPLE_TYPE == evsel->core.attr.sample_type);
+ TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->core.attr.sample_period);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong group name",
+ !strcmp(leader->group_name, "group1"));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* group1 cycles:kppp */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 3);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* group2 cycles + G modifier */
+ evsel = leader = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong group name",
+ !strcmp(leader->group_name, "group2"));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* group2 1:3 + G modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", 1 == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 3 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* instructions:u */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__group4(struct evlist *evlist __maybe_unused)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* cycles:u + p */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 1);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* instructions:kp + p */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ /* use of precise requires exclude_guest */
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip == 2);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__group5(struct evlist *evlist __maybe_unused)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->core.nr_groups);
+
+ /* cycles + G */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* instructions + G */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* cycles:G */
+ evsel = leader = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
+
+ /* instructions:G */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+
+ /* cycles */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+
+ return TEST_OK;
+}
+
+static int test__group_gh1(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* cycles + :H group modifier */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+
+ /* cache-misses:G + :H group modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+
+ return TEST_OK;
+}
+
+static int test__group_gh2(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* cycles + :G group modifier */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+
+ /* cache-misses:H + :G group modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+
+ return TEST_OK;
+}
+
+static int test__group_gh3(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* cycles:G + :u group modifier */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+
+ /* cache-misses:H + :u group modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+
+ return TEST_OK;
+}
+
+static int test__group_gh4(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong number of groups", 1 == evlist->core.nr_groups);
+
+ /* cycles:G + :uG group modifier */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong core.nr_members", evsel->core.nr_members == 2);
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 0);
+
+ /* cache-misses:H + :uG group modifier */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong group_idx", evsel__group_idx(evsel) == 1);
+
+ return TEST_OK;
+}
+
+static int test__leader_sample1(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
+
+ /* cycles - sampling group leader */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* cache-misses - not sampling */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* branch-misses - not sampling */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__leader_sample2(struct evlist *evlist __maybe_unused)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+
+ /* instructions - sampling group leader */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* branch-misses - not sampling */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ return TEST_OK;
+}
+
+static int test__checkevent_pinned_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__pinned_group(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
+
+ /* cycles - group leader */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong pinned", evsel->core.attr.pinned);
+
+ /* cache-misses - can not be pinned, but will go on with the leader */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+
+ /* branch-misses - ditto */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned);
+
+ return TEST_OK;
+}
+
+static int test__checkevent_exclusive_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->core.attr.precise_ip);
+ TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__exclusive_group(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->core.nr_entries);
+
+ /* cycles - group leader */
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong exclusive", evsel->core.attr.exclusive);
+
+ /* cache-misses - can not be pinned, but will go on with the leader */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+
+ /* branch-misses - ditto */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive);
+
+ return TEST_OK;
+}
+static int test__checkevent_breakpoint_len(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
+ evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_1 ==
+ evsel->core.attr.bp_len);
+
+ return TEST_OK;
+}
+
+static int test__checkevent_breakpoint_len_w(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong bp_type", HW_BREAKPOINT_W ==
+ evsel->core.attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_2 ==
+ evsel->core.attr.bp_len);
+
+ return TEST_OK;
+}
+
+static int
+test__checkevent_breakpoint_len_rw_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
+
+ return test__checkevent_breakpoint_rw(evlist);
+}
+
+static int test__checkevent_precise_max_modifier(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_SW_TASK_CLOCK == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__checkevent_config_symbol(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "insn") == 0);
+ return TEST_OK;
+}
+
+static int test__checkevent_config_raw(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "rawpmu") == 0);
+ return TEST_OK;
+}
+
+static int test__checkevent_config_num(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "numpmu") == 0);
+ return TEST_OK;
+}
+
+static int test__checkevent_config_cache(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "cachepmu") == 0);
+ return TEST_OK;
+}
+
+static bool test__intel_pt_valid(void)
+{
+ return !!perf_pmu__find("intel_pt");
+}
+
+static int test__intel_pt(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "intel_pt//u") == 0);
+ return TEST_OK;
+}
+
+static int test__checkevent_complex_name(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong complex name parsing", strcmp(evsel->name, "COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks") == 0);
+ return TEST_OK;
+}
+
+static int test__checkevent_raw_pmu(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__sym_event_slash(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ return TEST_OK;
+}
+
+static int test__sym_event_dc(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE);
+ TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES);
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ return TEST_OK;
+}
+
+static int count_tracepoints(void)
+{
+ struct dirent *events_ent;
+ DIR *events_dir;
+ int cnt = 0;
+
+ events_dir = tracing_events__opendir();
+
+ TEST_ASSERT_VAL("Can't open events dir", events_dir);
+
+ while ((events_ent = readdir(events_dir))) {
+ char *sys_path;
+ struct dirent *sys_ent;
+ DIR *sys_dir;
+
+ if (!strcmp(events_ent->d_name, ".")
+ || !strcmp(events_ent->d_name, "..")
+ || !strcmp(events_ent->d_name, "enable")
+ || !strcmp(events_ent->d_name, "header_event")
+ || !strcmp(events_ent->d_name, "header_page"))
+ continue;
+
+ sys_path = get_events_file(events_ent->d_name);
+ TEST_ASSERT_VAL("Can't get sys path", sys_path);
+
+ sys_dir = opendir(sys_path);
+ TEST_ASSERT_VAL("Can't open sys dir", sys_dir);
+
+ while ((sys_ent = readdir(sys_dir))) {
+ if (!strcmp(sys_ent->d_name, ".")
+ || !strcmp(sys_ent->d_name, "..")
+ || !strcmp(sys_ent->d_name, "enable")
+ || !strcmp(sys_ent->d_name, "filter"))
+ continue;
+
+ cnt++;
+ }
+
+ closedir(sys_dir);
+ put_events_file(sys_path);
+ }
+
+ closedir(events_dir);
+ return cnt;
+}
+
+static int test__all_tracepoints(struct evlist *evlist)
+{
+ TEST_ASSERT_VAL("wrong events count",
+ count_tracepoints() == evlist->core.nr_entries);
+
+ return test__checkevent_tracepoint_multi(evlist);
+}
+
+static int test__hybrid_hw_event_with_pmu(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__hybrid_hw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ return TEST_OK;
+}
+
+static int test__hybrid_sw_hw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ return TEST_OK;
+}
+
+static int test__hybrid_hw_sw_group_event(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ return TEST_OK;
+}
+
+static int test__hybrid_group_modifier1(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x3c == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0xc0 == evsel->core.attr.config);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
+ return TEST_OK;
+}
+
+static int test__hybrid_raw1(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ if (!perf_pmu__hybrid_mounted("cpu_atom")) {
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return TEST_OK;
+ }
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+
+ /* The type of second event is randome value */
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__hybrid_raw2(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x1a == evsel->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__hybrid_cache_event(struct evlist *evlist)
+{
+ struct evsel *evsel = evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", 0x2 == (evsel->core.attr.config & 0xffffffff));
+ return TEST_OK;
+}
+
+struct evlist_test {
+ const char *name;
+ bool (*valid)(void);
+ int (*check)(struct evlist *evlist);
+};
+
+static const struct evlist_test test__events[] = {
+ {
+ .name = "syscalls:sys_enter_openat",
+ .check = test__checkevent_tracepoint,
+ /* 0 */
+ },
+ {
+ .name = "syscalls:*",
+ .check = test__checkevent_tracepoint_multi,
+ /* 1 */
+ },
+ {
+ .name = "r1a",
+ .check = test__checkevent_raw,
+ /* 2 */
+ },
+ {
+ .name = "1:1",
+ .check = test__checkevent_numeric,
+ /* 3 */
+ },
+ {
+ .name = "instructions",
+ .check = test__checkevent_symbolic_name,
+ /* 4 */
+ },
+ {
+ .name = "cycles/period=100000,config2/",
+ .check = test__checkevent_symbolic_name_config,
+ /* 5 */
+ },
+ {
+ .name = "faults",
+ .check = test__checkevent_symbolic_alias,
+ /* 6 */
+ },
+ {
+ .name = "L1-dcache-load-miss",
+ .check = test__checkevent_genhw,
+ /* 7 */
+ },
+ {
+ .name = "mem:0",
+ .check = test__checkevent_breakpoint,
+ /* 8 */
+ },
+ {
+ .name = "mem:0:x",
+ .check = test__checkevent_breakpoint_x,
+ /* 9 */
+ },
+ {
+ .name = "mem:0:r",
+ .check = test__checkevent_breakpoint_r,
+ /* 0 */
+ },
+ {
+ .name = "mem:0:w",
+ .check = test__checkevent_breakpoint_w,
+ /* 1 */
+ },
+ {
+ .name = "syscalls:sys_enter_openat:k",
+ .check = test__checkevent_tracepoint_modifier,
+ /* 2 */
+ },
+ {
+ .name = "syscalls:*:u",
+ .check = test__checkevent_tracepoint_multi_modifier,
+ /* 3 */
+ },
+ {
+ .name = "r1a:kp",
+ .check = test__checkevent_raw_modifier,
+ /* 4 */
+ },
+ {
+ .name = "1:1:hp",
+ .check = test__checkevent_numeric_modifier,
+ /* 5 */
+ },
+ {
+ .name = "instructions:h",
+ .check = test__checkevent_symbolic_name_modifier,
+ /* 6 */
+ },
+ {
+ .name = "faults:u",
+ .check = test__checkevent_symbolic_alias_modifier,
+ /* 7 */
+ },
+ {
+ .name = "L1-dcache-load-miss:kp",
+ .check = test__checkevent_genhw_modifier,
+ /* 8 */
+ },
+ {
+ .name = "mem:0:u",
+ .check = test__checkevent_breakpoint_modifier,
+ /* 9 */
+ },
+ {
+ .name = "mem:0:x:k",
+ .check = test__checkevent_breakpoint_x_modifier,
+ /* 0 */
+ },
+ {
+ .name = "mem:0:r:hp",
+ .check = test__checkevent_breakpoint_r_modifier,
+ /* 1 */
+ },
+ {
+ .name = "mem:0:w:up",
+ .check = test__checkevent_breakpoint_w_modifier,
+ /* 2 */
+ },
+ {
+ .name = "r1,syscalls:sys_enter_openat:k,1:1:hp",
+ .check = test__checkevent_list,
+ /* 3 */
+ },
+ {
+ .name = "instructions:G",
+ .check = test__checkevent_exclude_host_modifier,
+ /* 4 */
+ },
+ {
+ .name = "instructions:H",
+ .check = test__checkevent_exclude_guest_modifier,
+ /* 5 */
+ },
+ {
+ .name = "mem:0:rw",
+ .check = test__checkevent_breakpoint_rw,
+ /* 6 */
+ },
+ {
+ .name = "mem:0:rw:kp",
+ .check = test__checkevent_breakpoint_rw_modifier,
+ /* 7 */
+ },
+ {
+ .name = "{instructions:k,cycles:upp}",
+ .check = test__group1,
+ /* 8 */
+ },
+ {
+ .name = "{faults:k,cache-references}:u,cycles:k",
+ .check = test__group2,
+ /* 9 */
+ },
+ {
+ .name = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
+ .check = test__group3,
+ /* 0 */
+ },
+ {
+ .name = "{cycles:u,instructions:kp}:p",
+ .check = test__group4,
+ /* 1 */
+ },
+ {
+ .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles",
+ .check = test__group5,
+ /* 2 */
+ },
+ {
+ .name = "*:*",
+ .check = test__all_tracepoints,
+ /* 3 */
+ },
+ {
+ .name = "{cycles,cache-misses:G}:H",
+ .check = test__group_gh1,
+ /* 4 */
+ },
+ {
+ .name = "{cycles,cache-misses:H}:G",
+ .check = test__group_gh2,
+ /* 5 */
+ },
+ {
+ .name = "{cycles:G,cache-misses:H}:u",
+ .check = test__group_gh3,
+ /* 6 */
+ },
+ {
+ .name = "{cycles:G,cache-misses:H}:uG",
+ .check = test__group_gh4,
+ /* 7 */
+ },
+ {
+ .name = "{cycles,cache-misses,branch-misses}:S",
+ .check = test__leader_sample1,
+ /* 8 */
+ },
+ {
+ .name = "{instructions,branch-misses}:Su",
+ .check = test__leader_sample2,
+ /* 9 */
+ },
+ {
+ .name = "instructions:uDp",
+ .check = test__checkevent_pinned_modifier,
+ /* 0 */
+ },
+ {
+ .name = "{cycles,cache-misses,branch-misses}:D",
+ .check = test__pinned_group,
+ /* 1 */
+ },
+ {
+ .name = "mem:0/1",
+ .check = test__checkevent_breakpoint_len,
+ /* 2 */
+ },
+ {
+ .name = "mem:0/2:w",
+ .check = test__checkevent_breakpoint_len_w,
+ /* 3 */
+ },
+ {
+ .name = "mem:0/4:rw:u",
+ .check = test__checkevent_breakpoint_len_rw_modifier,
+ /* 4 */
+ },
+#if defined(__s390x__)
+ {
+ .name = "kvm-s390:kvm_s390_create_vm",
+ .check = test__checkevent_tracepoint,
+ .valid = kvm_s390_create_vm_valid,
+ /* 0 */
+ },
+#endif
+ {
+ .name = "instructions:I",
+ .check = test__checkevent_exclude_idle_modifier,
+ /* 5 */
+ },
+ {
+ .name = "instructions:kIG",
+ .check = test__checkevent_exclude_idle_modifier_1,
+ /* 6 */
+ },
+ {
+ .name = "task-clock:P,cycles",
+ .check = test__checkevent_precise_max_modifier,
+ /* 7 */
+ },
+ {
+ .name = "instructions/name=insn/",
+ .check = test__checkevent_config_symbol,
+ /* 8 */
+ },
+ {
+ .name = "r1234/name=rawpmu/",
+ .check = test__checkevent_config_raw,
+ /* 9 */
+ },
+ {
+ .name = "4:0x6530160/name=numpmu/",
+ .check = test__checkevent_config_num,
+ /* 0 */
+ },
+ {
+ .name = "L1-dcache-misses/name=cachepmu/",
+ .check = test__checkevent_config_cache,
+ /* 1 */
+ },
+ {
+ .name = "intel_pt//u",
+ .valid = test__intel_pt_valid,
+ .check = test__intel_pt,
+ /* 2 */
+ },
+ {
+ .name = "cycles/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks'/Duk",
+ .check = test__checkevent_complex_name,
+ /* 3 */
+ },
+ {
+ .name = "cycles//u",
+ .check = test__sym_event_slash,
+ /* 4 */
+ },
+ {
+ .name = "cycles:k",
+ .check = test__sym_event_dc,
+ /* 5 */
+ },
+ {
+ .name = "instructions:uep",
+ .check = test__checkevent_exclusive_modifier,
+ /* 6 */
+ },
+ {
+ .name = "{cycles,cache-misses,branch-misses}:e",
+ .check = test__exclusive_group,
+ /* 7 */
+ },
+};
+
+static const struct evlist_test test__events_pmu[] = {
+ {
+ .name = "cpu/config=10,config1,config2=3,period=1000/u",
+ .check = test__checkevent_pmu,
+ /* 0 */
+ },
+ {
+ .name = "cpu/config=1,name=krava/u,cpu/config=2/u",
+ .check = test__checkevent_pmu_name,
+ /* 1 */
+ },
+ {
+ .name = "cpu/config=1,call-graph=fp,time,period=100000/,cpu/config=2,call-graph=no,time=0,period=2000/",
+ .check = test__checkevent_pmu_partial_time_callgraph,
+ /* 2 */
+ },
+ {
+ .name = "cpu/name='COMPLEX_CYCLES_NAME:orig=cycles,desc=chip-clock-ticks',period=0x1,event=0x2/ukp",
+ .check = test__checkevent_complex_name,
+ /* 3 */
+ },
+ {
+ .name = "software/r1a/",
+ .check = test__checkevent_raw_pmu,
+ /* 4 */
+ },
+ {
+ .name = "software/r0x1a/",
+ .check = test__checkevent_raw_pmu,
+ /* 5 */
+ },
+};
+
+struct terms_test {
+ const char *str;
+ int (*check)(struct list_head *terms);
+};
+
+static const struct terms_test test__terms[] = {
+ [0] = {
+ .str = "config=10,config1,config2=3,umask=1,read,r0xead",
+ .check = test__checkterms_simple,
+ },
+};
+
+static const struct evlist_test test__hybrid_events[] = {
+ {
+ .name = "cpu_core/cpu-cycles/",
+ .check = test__hybrid_hw_event_with_pmu,
+ /* 0 */
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
+ .check = test__hybrid_hw_group_event,
+ /* 1 */
+ },
+ {
+ .name = "{cpu-clock,cpu_core/cpu-cycles/}",
+ .check = test__hybrid_sw_hw_group_event,
+ /* 2 */
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/,cpu-clock}",
+ .check = test__hybrid_hw_sw_group_event,
+ /* 3 */
+ },
+ {
+ .name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
+ .check = test__hybrid_group_modifier1,
+ /* 4 */
+ },
+ {
+ .name = "r1a",
+ .check = test__hybrid_raw1,
+ /* 5 */
+ },
+ {
+ .name = "cpu_core/r1a/",
+ .check = test__hybrid_raw2,
+ /* 6 */
+ },
+ {
+ .name = "cpu_core/config=10,config1,config2=3,period=1000/u",
+ .check = test__checkevent_pmu,
+ /* 7 */
+ },
+ {
+ .name = "cpu_core/LLC-loads/",
+ .check = test__hybrid_cache_event,
+ /* 8 */
+ },
+};
+
+static int test_event(const struct evlist_test *e)
+{
+ struct parse_events_error err;
+ struct evlist *evlist;
+ int ret;
+
+ if (e->valid && !e->valid()) {
+ pr_debug("... SKIP\n");
+ return TEST_OK;
+ }
+
+ evlist = evlist__new();
+ if (evlist == NULL) {
+ pr_err("Failed allocation");
+ return TEST_FAIL;
+ }
+ parse_events_error__init(&err);
+ ret = parse_events(evlist, e->name, &err);
+ if (ret) {
+ pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+ e->name, ret, err.str);
+ parse_events_error__print(&err, e->name);
+ ret = TEST_FAIL;
+ if (strstr(err.str, "can't access trace events"))
+ ret = TEST_SKIP;
+ } else {
+ ret = e->check(evlist);
+ }
+ parse_events_error__exit(&err);
+ evlist__delete(evlist);
+
+ return ret;
+}
+
+static int test_event_fake_pmu(const char *str)
+{
+ struct parse_events_error err;
+ struct evlist *evlist;
+ int ret;
+
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ parse_events_error__init(&err);
+ perf_pmu__test_parse_init();
+ ret = __parse_events(evlist, str, &err, &perf_pmu__fake);
+ if (ret) {
+ pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+ str, ret, err.str);
+ parse_events_error__print(&err, str);
+ }
+
+ parse_events_error__exit(&err);
+ evlist__delete(evlist);
+
+ return ret;
+}
+
+static int combine_test_results(int existing, int latest)
+{
+ if (existing == TEST_FAIL)
+ return TEST_FAIL;
+ if (existing == TEST_SKIP)
+ return latest == TEST_OK ? TEST_SKIP : latest;
+ return latest;
+}
+
+static int test_events(const struct evlist_test *events, int cnt)
+{
+ int ret = TEST_OK;
+
+ for (int i = 0; i < cnt; i++) {
+ const struct evlist_test *e = &events[i];
+ int test_ret;
+
+ pr_debug("running test %d '%s'\n", i, e->name);
+ test_ret = test_event(e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Event test failure: test %d '%s'", i, e->name);
+ ret = combine_test_results(ret, test_ret);
+ }
+ }
+
+ return ret;
+}
+
+static int test__events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ return test_events(test__events, ARRAY_SIZE(test__events));
+}
+
+static int test_term(const struct terms_test *t)
+{
+ struct list_head terms;
+ int ret;
+
+ INIT_LIST_HEAD(&terms);
+
+ /*
+ * The perf_pmu__test_parse_init prepares perf_pmu_events_list
+ * which gets freed in parse_events_terms.
+ */
+ if (perf_pmu__test_parse_init())
+ return -1;
+
+ ret = parse_events_terms(&terms, t->str);
+ if (ret) {
+ pr_debug("failed to parse terms '%s', err %d\n",
+ t->str , ret);
+ return ret;
+ }
+
+ ret = t->check(&terms);
+ parse_events_terms__purge(&terms);
+
+ return ret;
+}
+
+static int test_terms(const struct terms_test *terms, int cnt)
+{
+ int ret = 0;
+
+ for (int i = 0; i < cnt; i++) {
+ const struct terms_test *t = &terms[i];
+
+ pr_debug("running test %d '%s'\n", i, t->str);
+ ret = test_term(t);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int test__terms2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ return test_terms(test__terms, ARRAY_SIZE(test__terms));
+}
+
+static int test_pmu(void)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ int ret;
+
+ snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/format/",
+ sysfs__mountpoint());
+
+ ret = stat(path, &st);
+ if (ret)
+ pr_debug("omitting PMU cpu tests\n");
+ return !ret;
+}
+
+static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ struct dirent *ent;
+ DIR *dir;
+ int ret;
+
+ if (!test_pmu())
+ return TEST_SKIP;
+
+ snprintf(path, PATH_MAX, "%s/bus/event_source/devices/cpu/events/",
+ sysfs__mountpoint());
+
+ ret = stat(path, &st);
+ if (ret) {
+ pr_debug("omitting PMU cpu events tests: %s\n", path);
+ return TEST_OK;
+ }
+
+ dir = opendir(path);
+ if (!dir) {
+ pr_debug("can't open pmu event dir: %s\n", path);
+ return TEST_FAIL;
+ }
+
+ ret = TEST_OK;
+ while ((ent = readdir(dir))) {
+ struct evlist_test e = { .name = NULL, };
+ char name[2 * NAME_MAX + 1 + 12 + 3];
+ int test_ret;
+
+ /* Names containing . are special and cannot be used directly */
+ if (strchr(ent->d_name, '.'))
+ continue;
+
+ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
+
+ e.name = name;
+ e.check = test__checkevent_pmu_events;
+
+ test_ret = test_event(&e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Test PMU event failed for '%s'", name);
+ ret = combine_test_results(ret, test_ret);
+ }
+ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ e.name = name;
+ e.check = test__checkevent_pmu_events_mix;
+ test_ret = test_event(&e);
+ if (test_ret != TEST_OK) {
+ pr_debug("Test PMU event failed for '%s'", name);
+ ret = combine_test_results(ret, test_ret);
+ }
+ }
+
+ closedir(dir);
+ return ret;
+}
+
+static int test__pmu_events2(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ if (!test_pmu())
+ return TEST_SKIP;
+
+ return test_events(test__events_pmu, ARRAY_SIZE(test__events_pmu));
+}
+
+static bool test_alias(char **event, char **alias)
+{
+ char path[PATH_MAX];
+ DIR *dir;
+ struct dirent *dent;
+ const char *sysfs = sysfs__mountpoint();
+ char buf[128];
+ FILE *file;
+
+ if (!sysfs)
+ return false;
+
+ snprintf(path, PATH_MAX, "%s/bus/event_source/devices/", sysfs);
+ dir = opendir(path);
+ if (!dir)
+ return false;
+
+ while ((dent = readdir(dir))) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s/alias",
+ sysfs, dent->d_name);
+
+ if (!file_available(path))
+ continue;
+
+ file = fopen(path, "r");
+ if (!file)
+ continue;
+
+ if (!fgets(buf, sizeof(buf), file)) {
+ fclose(file);
+ continue;
+ }
+
+ /* Remove the last '\n' */
+ buf[strlen(buf) - 1] = 0;
+
+ fclose(file);
+ *event = strdup(dent->d_name);
+ *alias = strdup(buf);
+ closedir(dir);
+
+ if (*event == NULL || *alias == NULL) {
+ free(*event);
+ free(*alias);
+ return false;
+ }
+
+ return true;
+ }
+
+ closedir(dir);
+ return false;
+}
+
+static int test__hybrid(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ if (!perf_pmu__has_hybrid())
+ return TEST_SKIP;
+
+ return test_events(test__hybrid_events, ARRAY_SIZE(test__hybrid_events));
+}
+
+static int test__checkevent_pmu_events_alias(struct evlist *evlist)
+{
+ struct evsel *evsel1 = evlist__first(evlist);
+ struct evsel *evsel2 = evlist__last(evlist);
+
+ TEST_ASSERT_VAL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", evsel1->core.attr.config == evsel2->core.attr.config);
+ return TEST_OK;
+}
+
+static int test__pmu_events_alias(char *event, char *alias)
+{
+ struct evlist_test e = { .name = NULL, };
+ char name[2 * NAME_MAX + 20];
+
+ snprintf(name, sizeof(name), "%s/event=1/,%s/event=1/",
+ event, alias);
+
+ e.name = name;
+ e.check = test__checkevent_pmu_events_alias;
+ return test_event(&e);
+}
+
+static int test__alias(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ char *event, *alias;
+ int ret;
+
+ if (!test_alias(&event, &alias))
+ return TEST_SKIP;
+
+ ret = test__pmu_events_alias(event, alias);
+
+ free(event);
+ free(alias);
+ return ret;
+}
+
+static int test__pmu_events_alias2(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ static const char events[][30] = {
+ "event-hyphen",
+ "event-two-hyph",
+ };
+ int ret = TEST_OK;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(events); i++) {
+ int test_ret = test_event_fake_pmu(&events[i][0]);
+
+ if (test_ret != TEST_OK) {
+ pr_debug("check_parse_fake %s failed\n", &events[i][0]);
+ ret = combine_test_results(ret, test_ret);
+ }
+ }
+
+ return ret;
+}
+
+static struct test_case tests__parse_events[] = {
+ TEST_CASE_REASON("Test event parsing",
+ events2,
+ "permissions"),
+ TEST_CASE_REASON("Test parsing of \"hybrid\" CPU events",
+ hybrid,
+ "not hybrid"),
+ TEST_CASE_REASON("Parsing of all PMU events from sysfs",
+ pmu_events,
+ "permissions"),
+ TEST_CASE_REASON("Parsing of given PMU events from sysfs",
+ pmu_events2,
+ "permissions"),
+ TEST_CASE_REASON("Parsing of aliased events from sysfs", alias,
+ "no aliases in sysfs"),
+ TEST_CASE("Parsing of aliased events", pmu_events_alias2),
+ TEST_CASE("Parsing of terms (event modifiers)", terms2),
+ { .name = NULL, }
+};
+
+struct test_suite suite__parse_events = {
+ .desc = "Parse event definition strings",
+ .test_cases = tests__parse_events,
+};
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
new file mode 100644
index 000000000..68f5a2a03
--- /dev/null
+++ b/tools/perf/tests/parse-metric.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <string.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include "metricgroup.h"
+#include "tests.h"
+#include "pmu-events/pmu-events.h"
+#include "evlist.h"
+#include "rblist.h"
+#include "debug.h"
+#include "expr.h"
+#include "stat.h"
+#include "pmu.h"
+
+struct value {
+ const char *event;
+ u64 val;
+};
+
+static u64 find_value(const char *name, struct value *values)
+{
+ struct value *v = values;
+
+ while (v->event) {
+ if (!strcmp(name, v->event))
+ return v->val;
+ v++;
+ }
+ return 0;
+}
+
+static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist,
+ struct value *vals)
+{
+ struct evsel *evsel;
+ u64 count;
+
+ perf_stat__reset_shadow_stats();
+ evlist__for_each_entry(evlist, evsel) {
+ count = find_value(evsel->name, vals);
+ perf_stat__update_shadow_stats(evsel, count, 0, st);
+ if (!strcmp(evsel->name, "duration_time"))
+ update_stats(&walltime_nsecs_stats, count);
+ }
+}
+
+static double compute_single(struct rblist *metric_events, struct evlist *evlist,
+ struct runtime_stat *st, const char *name)
+{
+ struct metric_expr *mexp;
+ struct metric_event *me;
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ me = metricgroup__lookup(metric_events, evsel, false);
+ if (me != NULL) {
+ list_for_each_entry (mexp, &me->head, nd) {
+ if (strcmp(mexp->metric_name, name))
+ continue;
+ return test_generic_metric(mexp, 0, st);
+ }
+ }
+ }
+ return 0.;
+}
+
+static int __compute_metric(const char *name, struct value *vals,
+ const char *name1, double *ratio1,
+ const char *name2, double *ratio2)
+{
+ struct rblist metric_events = {
+ .nr_entries = 0,
+ };
+ const struct pmu_events_table *pme_test;
+ struct perf_cpu_map *cpus;
+ struct runtime_stat st;
+ struct evlist *evlist;
+ int err;
+
+ /*
+ * We need to prepare evlist for stat mode running on CPU 0
+ * because that's where all the stats are going to be created.
+ */
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ cpus = perf_cpu_map__new("0");
+ if (!cpus) {
+ evlist__delete(evlist);
+ return -ENOMEM;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, NULL);
+ runtime_stat__init(&st);
+
+ /* Parse the metric into metric_events list. */
+ pme_test = find_core_events_table("testarch", "testcpu");
+ err = metricgroup__parse_groups_test(evlist, pme_test, name,
+ false, false,
+ &metric_events);
+ if (err)
+ goto out;
+
+ err = evlist__alloc_stats(evlist, false);
+ if (err)
+ goto out;
+
+ /* Load the runtime stats with given numbers for events. */
+ load_runtime_stat(&st, evlist, vals);
+
+ /* And execute the metric */
+ if (name1 && ratio1)
+ *ratio1 = compute_single(&metric_events, evlist, &st, name1);
+ if (name2 && ratio2)
+ *ratio2 = compute_single(&metric_events, evlist, &st, name2);
+
+out:
+ /* ... cleanup. */
+ metricgroup__rblist_exit(&metric_events);
+ runtime_stat__exit(&st);
+ evlist__free_stats(evlist);
+ perf_cpu_map__put(cpus);
+ evlist__delete(evlist);
+ return err;
+}
+
+static int compute_metric(const char *name, struct value *vals, double *ratio)
+{
+ return __compute_metric(name, vals, name, ratio, NULL, NULL);
+}
+
+static int compute_metric_group(const char *name, struct value *vals,
+ const char *name1, double *ratio1,
+ const char *name2, double *ratio2)
+{
+ return __compute_metric(name, vals, name1, ratio1, name2, ratio2);
+}
+
+static int test_ipc(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "inst_retired.any", .val = 300 },
+ { .event = "cpu_clk_unhalted.thread", .val = 200 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("IPC", vals, &ratio) == 0);
+
+ TEST_ASSERT_VAL("IPC failed, wrong ratio",
+ ratio == 1.5);
+ return 0;
+}
+
+static int test_frontend(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "idq_uops_not_delivered.core", .val = 300 },
+ { .event = "cpu_clk_unhalted.thread", .val = 200 },
+ { .event = "cpu_clk_unhalted.one_thread_active", .val = 400 },
+ { .event = "cpu_clk_unhalted.ref_xclk", .val = 600 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("Frontend_Bound_SMT", vals, &ratio) == 0);
+
+ TEST_ASSERT_VAL("Frontend_Bound_SMT failed, wrong ratio",
+ ratio == 0.45);
+ return 0;
+}
+
+static int test_cache_miss_cycles(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "l1d-loads-misses", .val = 300 },
+ { .event = "l1i-loads-misses", .val = 200 },
+ { .event = "inst_retired.any", .val = 400 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("cache_miss_cycles", vals, &ratio) == 0);
+
+ TEST_ASSERT_VAL("cache_miss_cycles failed, wrong ratio",
+ ratio == 1.25);
+ return 0;
+}
+
+
+/*
+ * DCache_L2_All_Hits = l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hi
+ * DCache_L2_All_Miss = max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) +
+ * l2_rqsts.pf_miss + l2_rqsts.rfo_miss
+ * DCache_L2_All = dcache_l2_all_hits + dcache_l2_all_miss
+ * DCache_L2_Hits = d_ratio(dcache_l2_all_hits, dcache_l2_all)
+ * DCache_L2_Misses = d_ratio(dcache_l2_all_miss, dcache_l2_all)
+ *
+ * l2_rqsts.demand_data_rd_hit = 100
+ * l2_rqsts.pf_hit = 200
+ * l2_rqsts.rfo_hi = 300
+ * l2_rqsts.all_demand_data_rd = 400
+ * l2_rqsts.pf_miss = 500
+ * l2_rqsts.rfo_miss = 600
+ *
+ * DCache_L2_All_Hits = 600
+ * DCache_L2_All_Miss = MAX(400 - 100, 0) + 500 + 600 = 1400
+ * DCache_L2_All = 600 + 1400 = 2000
+ * DCache_L2_Hits = 600 / 2000 = 0.3
+ * DCache_L2_Misses = 1400 / 2000 = 0.7
+ */
+static int test_dcache_l2(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "l2_rqsts.demand_data_rd_hit", .val = 100 },
+ { .event = "l2_rqsts.pf_hit", .val = 200 },
+ { .event = "l2_rqsts.rfo_hit", .val = 300 },
+ { .event = "l2_rqsts.all_demand_data_rd", .val = 400 },
+ { .event = "l2_rqsts.pf_miss", .val = 500 },
+ { .event = "l2_rqsts.rfo_miss", .val = 600 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("DCache_L2_Hits", vals, &ratio) == 0);
+
+ TEST_ASSERT_VAL("DCache_L2_Hits failed, wrong ratio",
+ ratio == 0.3);
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("DCache_L2_Misses", vals, &ratio) == 0);
+
+ TEST_ASSERT_VAL("DCache_L2_Misses failed, wrong ratio",
+ ratio == 0.7);
+ return 0;
+}
+
+static int test_recursion_fail(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "inst_retired.any", .val = 300 },
+ { .event = "cpu_clk_unhalted.thread", .val = 200 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to find recursion",
+ compute_metric("M1", vals, &ratio) == -1);
+
+ TEST_ASSERT_VAL("failed to find recursion",
+ compute_metric("M3", vals, &ratio) == -1);
+ return 0;
+}
+
+static int test_memory_bandwidth(void)
+{
+ double ratio;
+ struct value vals[] = {
+ { .event = "l1d.replacement", .val = 4000000 },
+ { .event = "duration_time", .val = 200000000 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to compute metric",
+ compute_metric("L1D_Cache_Fill_BW", vals, &ratio) == 0);
+ TEST_ASSERT_VAL("L1D_Cache_Fill_BW, wrong ratio",
+ 1.28 == ratio);
+
+ return 0;
+}
+
+static int test_metric_group(void)
+{
+ double ratio1, ratio2;
+ struct value vals[] = {
+ { .event = "cpu_clk_unhalted.thread", .val = 200 },
+ { .event = "l1d-loads-misses", .val = 300 },
+ { .event = "l1i-loads-misses", .val = 200 },
+ { .event = "inst_retired.any", .val = 400 },
+ { .event = NULL, },
+ };
+
+ TEST_ASSERT_VAL("failed to find recursion",
+ compute_metric_group("group1", vals,
+ "IPC", &ratio1,
+ "cache_miss_cycles", &ratio2) == 0);
+
+ TEST_ASSERT_VAL("group IPC failed, wrong ratio",
+ ratio1 == 2.0);
+
+ TEST_ASSERT_VAL("group cache_miss_cycles failed, wrong ratio",
+ ratio2 == 1.25);
+ return 0;
+}
+
+static int test__parse_metric(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("IPC failed", test_ipc() == 0);
+ TEST_ASSERT_VAL("frontend failed", test_frontend() == 0);
+ TEST_ASSERT_VAL("DCache_L2 failed", test_dcache_l2() == 0);
+ TEST_ASSERT_VAL("recursion fail failed", test_recursion_fail() == 0);
+ TEST_ASSERT_VAL("Memory bandwidth", test_memory_bandwidth() == 0);
+
+ if (!perf_pmu__has_hybrid()) {
+ TEST_ASSERT_VAL("cache_miss_cycles failed", test_cache_miss_cycles() == 0);
+ TEST_ASSERT_VAL("test metric group", test_metric_group() == 0);
+ }
+ return 0;
+}
+
+DEFINE_SUITE("Parse and process metrics", parse_metric);
diff --git a/tools/perf/tests/parse-no-sample-id-all.c b/tools/perf/tests/parse-no-sample-id-all.c
new file mode 100644
index 000000000..d62e31595
--- /dev/null
+++ b/tools/perf/tests/parse-no-sample-id-all.c
@@ -0,0 +1,108 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <stddef.h>
+
+#include "tests.h"
+
+#include "event.h"
+#include "evlist.h"
+#include "header.h"
+#include "debug.h"
+
+static int process_event(struct evlist **pevlist, union perf_event *event)
+{
+ struct perf_sample sample;
+
+ if (event->header.type == PERF_RECORD_HEADER_ATTR) {
+ if (perf_event__process_attr(NULL, event, pevlist)) {
+ pr_debug("perf_event__process_attr failed\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ if (event->header.type >= PERF_RECORD_USER_TYPE_START)
+ return -1;
+
+ if (!*pevlist)
+ return -1;
+
+ if (evlist__parse_sample(*pevlist, event, &sample)) {
+ pr_debug("evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int process_events(union perf_event **events, size_t count)
+{
+ struct evlist *evlist = NULL;
+ int err = 0;
+ size_t i;
+
+ for (i = 0; i < count && !err; i++)
+ err = process_event(&evlist, events[i]);
+
+ evlist__delete(evlist);
+
+ return err;
+}
+
+struct test_attr_event {
+ struct perf_event_header header;
+ struct perf_event_attr attr;
+ u64 id;
+};
+
+/**
+ * test__parse_no_sample_id_all - test parsing with no sample_id_all bit set.
+ *
+ * This function tests parsing data produced on kernel's that do not support the
+ * sample_id_all bit. Without the sample_id_all bit, non-sample events (such as
+ * mmap events) do not have an id sample appended, and consequently logic
+ * designed to determine the id will not work. That case happens when there is
+ * more than one selected event, so this test processes three events: 2
+ * attributes representing the selected events and one mmap event.
+ *
+ * Return: %0 on success, %-1 if the test fails.
+ */
+static int test__parse_no_sample_id_all(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err;
+
+ struct test_attr_event event1 = {
+ .header = {
+ .type = PERF_RECORD_HEADER_ATTR,
+ .size = sizeof(struct test_attr_event),
+ },
+ .id = 1,
+ };
+ struct test_attr_event event2 = {
+ .header = {
+ .type = PERF_RECORD_HEADER_ATTR,
+ .size = sizeof(struct test_attr_event),
+ },
+ .id = 2,
+ };
+ struct perf_record_mmap event3 = {
+ .header = {
+ .type = PERF_RECORD_MMAP,
+ .size = sizeof(struct perf_record_mmap),
+ },
+ };
+ union perf_event *events[] = {
+ (union perf_event *)&event1,
+ (union perf_event *)&event2,
+ (union perf_event *)&event3,
+ };
+
+ err = process_events(events, ARRAY_SIZE(events));
+ if (err)
+ return -1;
+
+ return 0;
+}
+
+DEFINE_SUITE("Parse with no sample_id_all bit set", parse_no_sample_id_all);
diff --git a/tools/perf/tests/pe-file-parsing.c b/tools/perf/tests/pe-file-parsing.c
new file mode 100644
index 000000000..c09a9fae1
--- /dev/null
+++ b/tools/perf/tests/pe-file-parsing.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdbool.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <subcmd/exec-cmd.h>
+
+#include "debug.h"
+#include "util/build-id.h"
+#include "util/symbol.h"
+#include "util/dso.h"
+
+#include "tests.h"
+
+#ifdef HAVE_LIBBFD_SUPPORT
+
+static int run_dir(const char *d)
+{
+ char filename[PATH_MAX];
+ char debugfile[PATH_MAX];
+ struct build_id bid;
+ char debuglink[PATH_MAX];
+ char expect_build_id[] = {
+ 0x5a, 0x0f, 0xd8, 0x82, 0xb5, 0x30, 0x84, 0x22,
+ 0x4b, 0xa4, 0x7b, 0x62, 0x4c, 0x55, 0xa4, 0x69,
+ };
+ char expect_debuglink[PATH_MAX] = "pe-file.exe.debug";
+ struct dso *dso;
+ struct symbol *sym;
+ int ret;
+
+ scnprintf(filename, PATH_MAX, "%s/pe-file.exe", d);
+ ret = filename__read_build_id(filename, &bid);
+ TEST_ASSERT_VAL("Failed to read build_id",
+ ret == sizeof(expect_build_id));
+ TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
+ sizeof(expect_build_id)));
+
+ ret = filename__read_debuglink(filename, debuglink, PATH_MAX);
+ TEST_ASSERT_VAL("Failed to read debuglink", ret == 0);
+ TEST_ASSERT_VAL("Wrong debuglink",
+ !strcmp(debuglink, expect_debuglink));
+
+ scnprintf(debugfile, PATH_MAX, "%s/%s", d, debuglink);
+ ret = filename__read_build_id(debugfile, &bid);
+ TEST_ASSERT_VAL("Failed to read debug file build_id",
+ ret == sizeof(expect_build_id));
+ TEST_ASSERT_VAL("Wrong build_id", !memcmp(bid.data, expect_build_id,
+ sizeof(expect_build_id)));
+
+ dso = dso__new(filename);
+ TEST_ASSERT_VAL("Failed to get dso", dso);
+
+ ret = dso__load_bfd_symbols(dso, debugfile);
+ TEST_ASSERT_VAL("Failed to load symbols", ret == 0);
+
+ dso__sort_by_name(dso);
+ sym = dso__find_symbol_by_name(dso, "main");
+ TEST_ASSERT_VAL("Failed to find main", sym);
+ dso__delete(dso);
+
+ return TEST_OK;
+}
+
+static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct stat st;
+ char path_dir[PATH_MAX];
+
+ /* First try development tree tests. */
+ if (!lstat("./tests", &st))
+ return run_dir("./tests");
+
+ /* Then installed path. */
+ snprintf(path_dir, PATH_MAX, "%s/tests", get_argv_exec_path());
+
+ if (!lstat(path_dir, &st))
+ return run_dir(path_dir);
+
+ return TEST_SKIP;
+}
+
+#else
+
+static int test__pe_file_parsing(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return TEST_SKIP;
+}
+
+#endif
+
+DEFINE_SUITE("PE file support", pe_file_parsing);
diff --git a/tools/perf/tests/pe-file.c b/tools/perf/tests/pe-file.c
new file mode 100644
index 000000000..eb3df5e98
--- /dev/null
+++ b/tools/perf/tests/pe-file.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// pe-file.exe and pe-file.exe.debug built with;
+// x86_64-w64-mingw32-gcc -o pe-file.exe pe-file.c
+// -Wl,--file-alignment,4096 -Wl,--build-id
+// x86_64-w64-mingw32-objcopy --only-keep-debug
+// --compress-debug-sections pe-file.exe pe-file.exe.debug
+// x86_64-w64-mingw32-objcopy --strip-debug
+// --add-gnu-debuglink=pe-file.exe.debug pe-file.exe
+
+int main(int argc, char const *argv[])
+{
+ return 0;
+}
diff --git a/tools/perf/tests/pe-file.exe b/tools/perf/tests/pe-file.exe
new file mode 100644
index 000000000..838a46dae
--- /dev/null
+++ b/tools/perf/tests/pe-file.exe
Binary files differ
diff --git a/tools/perf/tests/pe-file.exe.debug b/tools/perf/tests/pe-file.exe.debug
new file mode 100644
index 000000000..287d6718d
--- /dev/null
+++ b/tools/perf/tests/pe-file.exe.debug
Binary files differ
diff --git a/tools/perf/tests/perf-hooks.c b/tools/perf/tests/perf-hooks.c
new file mode 100644
index 000000000..78cdeb896
--- /dev/null
+++ b/tools/perf/tests/perf-hooks.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <signal.h>
+#include <stdlib.h>
+
+#include "tests.h"
+#include "debug.h"
+#include "perf-hooks.h"
+
+static void sigsegv_handler(int sig __maybe_unused)
+{
+ pr_debug("SIGSEGV is observed as expected, try to recover.\n");
+ perf_hooks__recover();
+ signal(SIGSEGV, SIG_DFL);
+ raise(SIGSEGV);
+ exit(-1);
+}
+
+
+static void the_hook(void *_hook_flags)
+{
+ int *hook_flags = _hook_flags;
+
+ *hook_flags = 1234;
+
+ /* Generate a segfault, test perf_hooks__recover */
+ raise(SIGSEGV);
+}
+
+static int test__perf_hooks(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int hook_flags = 0;
+
+ signal(SIGSEGV, sigsegv_handler);
+ perf_hooks__set_hook("test", the_hook, &hook_flags);
+ perf_hooks__invoke_test();
+
+ /* hook is triggered? */
+ if (hook_flags != 1234) {
+ pr_debug("Setting failed: %d (%p)\n", hook_flags, &hook_flags);
+ return TEST_FAIL;
+ }
+
+ /* the buggy hook is removed? */
+ if (perf_hooks__get_hook("test"))
+ return TEST_FAIL;
+ return TEST_OK;
+}
+
+DEFINE_SUITE("perf hooks", perf_hooks);
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
new file mode 100644
index 000000000..7aa946aa8
--- /dev/null
+++ b/tools/perf/tests/perf-record.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/string.h>
+
+#include <sched.h>
+#include <perf/mmap.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "debug.h"
+#include "record.h"
+#include "tests.h"
+#include "util/mmap.h"
+
+static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
+{
+ int i, cpu = -1, nrcpus = 1024;
+realloc:
+ CPU_ZERO(maskp);
+
+ if (sched_getaffinity(pid, sizeof(*maskp), maskp) == -1) {
+ if (errno == EINVAL && nrcpus < (1024 << 8)) {
+ nrcpus = nrcpus << 2;
+ goto realloc;
+ }
+ perror("sched_getaffinity");
+ return -1;
+ }
+
+ for (i = 0; i < nrcpus; i++) {
+ if (CPU_ISSET(i, maskp)) {
+ if (cpu == -1)
+ cpu = i;
+ else
+ CPU_CLR(i, maskp);
+ }
+ }
+
+ return cpu;
+}
+
+static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct record_opts opts = {
+ .target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ },
+ .no_buffering = true,
+ .mmap_pages = 256,
+ };
+ cpu_set_t cpu_mask;
+ size_t cpu_mask_size = sizeof(cpu_mask);
+ struct evlist *evlist = evlist__new_dummy();
+ struct evsel *evsel;
+ struct perf_sample sample;
+ const char *cmd = "sleep";
+ const char *argv[] = { cmd, "1", NULL, };
+ char *bname, *mmap_filename;
+ u64 prev_time = 0;
+ bool found_cmd_mmap = false,
+ found_coreutils_mmap = false,
+ found_libc_mmap = false,
+ found_vdso_mmap = false,
+ found_ld_mmap = false;
+ int err = -1, errs = 0, i, wakeups = 0;
+ u32 cpu;
+ int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
+ char sbuf[STRERR_BUFSIZE];
+
+ if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
+ evlist = evlist__new_default();
+
+ if (evlist == NULL) {
+ pr_debug("Not enough memory to create evlist\n");
+ goto out;
+ }
+
+ /*
+ * Create maps of threads and cpus to monitor. In this case
+ * we start with all threads and cpus (-1, -1) but then in
+ * evlist__prepare_workload we'll fill in the only thread
+ * we're monitoring, the one forked there.
+ */
+ err = evlist__create_maps(evlist, &opts.target);
+ if (err < 0) {
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Prepare the workload in argv[] to run, it'll fork it, and then wait
+ * for evlist__start_workload() to exec it. This is done this way
+ * so that we have time to open the evlist (calling sys_perf_event_open
+ * on all the fds) and then mmap them.
+ */
+ err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Config the evsels, setting attr->comm on the first one, etc.
+ */
+ evsel = evlist__first(evlist);
+ evsel__set_sample_bit(evsel, CPU);
+ evsel__set_sample_bit(evsel, TID);
+ evsel__set_sample_bit(evsel, TIME);
+ evlist__config(evlist, &opts, NULL);
+
+ err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
+ if (err < 0) {
+ pr_debug("sched__get_first_possible_cpu: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ cpu = err;
+
+ /*
+ * So that we can check perf_sample.cpu on all the samples.
+ */
+ if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
+ pr_debug("sched_setaffinity: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Call sys_perf_event_open on all the fds on all the evsels,
+ * grouping them if asked to.
+ */
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("perf_evlist__open: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * mmap the first fd on a given CPU and ask for events for the other
+ * fds in the same CPU to be injected in the same mmap ring buffer
+ * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
+ */
+ err = evlist__mmap(evlist, opts.mmap_pages);
+ if (err < 0) {
+ pr_debug("evlist__mmap: %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ /*
+ * Now that all is properly set up, enable the events, they will
+ * count just on workload.pid, which will start...
+ */
+ evlist__enable(evlist);
+
+ /*
+ * Now!
+ */
+ evlist__start_workload(evlist);
+
+ while (1) {
+ int before = total_events;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ union perf_event *event;
+ struct mmap *md;
+
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ const u32 type = event->header.type;
+ const char *name = perf_event__name(type);
+
+ ++total_events;
+ if (type < PERF_RECORD_MAX)
+ nr_events[type]++;
+
+ err = evlist__parse_sample(evlist, event, &sample);
+ if (err < 0) {
+ if (verbose > 0)
+ perf_event__fprintf(event, NULL, stderr);
+ pr_debug("Couldn't parse sample\n");
+ goto out_delete_evlist;
+ }
+
+ if (verbose > 0) {
+ pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
+ perf_event__fprintf(event, NULL, stderr);
+ }
+
+ if (prev_time > sample.time) {
+ pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
+ name, prev_time, sample.time);
+ ++errs;
+ }
+
+ prev_time = sample.time;
+
+ if (sample.cpu != cpu) {
+ pr_debug("%s with unexpected cpu, expected %d, got %d\n",
+ name, cpu, sample.cpu);
+ ++errs;
+ }
+
+ if ((pid_t)sample.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.pid);
+ ++errs;
+ }
+
+ if ((pid_t)sample.tid != evlist->workload.pid) {
+ pr_debug("%s with unexpected tid, expected %d, got %d\n",
+ name, evlist->workload.pid, sample.tid);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP ||
+ type == PERF_RECORD_MMAP2 ||
+ type == PERF_RECORD_FORK ||
+ type == PERF_RECORD_EXIT) &&
+ (pid_t)event->comm.pid != evlist->workload.pid) {
+ pr_debug("%s with unexpected pid/tid\n", name);
+ ++errs;
+ }
+
+ if ((type == PERF_RECORD_COMM ||
+ type == PERF_RECORD_MMAP ||
+ type == PERF_RECORD_MMAP2) &&
+ event->comm.pid != event->comm.tid) {
+ pr_debug("%s with different pid/tid!\n", name);
+ ++errs;
+ }
+
+ switch (type) {
+ case PERF_RECORD_COMM:
+ if (strcmp(event->comm.comm, cmd)) {
+ pr_debug("%s with unexpected comm!\n", name);
+ ++errs;
+ }
+ break;
+ case PERF_RECORD_EXIT:
+ goto found_exit;
+ case PERF_RECORD_MMAP:
+ mmap_filename = event->mmap.filename;
+ goto check_bname;
+ case PERF_RECORD_MMAP2:
+ mmap_filename = event->mmap2.filename;
+ check_bname:
+ bname = strrchr(mmap_filename, '/');
+ if (bname != NULL) {
+ if (!found_cmd_mmap)
+ found_cmd_mmap = !strcmp(bname + 1, cmd);
+ if (!found_coreutils_mmap)
+ found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
+ if (!found_libc_mmap)
+ found_libc_mmap = !strncmp(bname + 1, "libc", 4);
+ if (!found_ld_mmap)
+ found_ld_mmap = !strncmp(bname + 1, "ld", 2);
+ } else if (!found_vdso_mmap)
+ found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
+ break;
+
+ case PERF_RECORD_SAMPLE:
+ /* Just ignore samples for now */
+ break;
+ default:
+ pr_debug("Unexpected perf_event->header.type %d!\n",
+ type);
+ ++errs;
+ }
+
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+ }
+
+ /*
+ * We don't use poll here because at least at 3.1 times the
+ * PERF_RECORD_{!SAMPLE} events don't honour
+ * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
+ */
+ if (total_events == before && false)
+ evlist__poll(evlist, -1);
+
+ sleep(1);
+ if (++wakeups > 5) {
+ pr_debug("No PERF_RECORD_EXIT event!\n");
+ break;
+ }
+ }
+
+found_exit:
+ if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
+ pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
+ ++errs;
+ }
+
+ if (nr_events[PERF_RECORD_COMM] == 0) {
+ pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_cmd_mmap && !found_coreutils_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
+ ++errs;
+ }
+
+ if (!found_libc_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
+ ++errs;
+ }
+
+ if (!found_ld_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
+ ++errs;
+ }
+
+ if (!found_vdso_mmap) {
+ pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
+ ++errs;
+ }
+out_delete_evlist:
+ evlist__delete(evlist);
+out:
+ if (err == -EACCES)
+ return TEST_SKIP;
+ if (err < 0 || errs != 0)
+ return TEST_FAIL;
+ return TEST_OK;
+}
+
+static struct test_case tests__PERF_RECORD[] = {
+ TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
+ PERF_RECORD,
+ "permissions"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__PERF_RECORD = {
+ .desc = "PERF_RECORD_* events & perf_sample fields",
+ .test_cases = tests__PERF_RECORD,
+};
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg
new file mode 100755
index 000000000..fae26b1cf
--- /dev/null
+++ b/tools/perf/tests/perf-targz-src-pkg
@@ -0,0 +1,22 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Test one of the main kernel Makefile targets to generate a perf sources tarball
+# suitable for build outside the full kernel sources.
+#
+# This is to test that the tools/perf/MANIFEST file lists all the files needed to
+# be in such tarball, which sometimes gets broken when we move files around,
+# like when we made some files that were in tools/perf/ available to other tools/
+# codebases by moving it to tools/include/, etc.
+
+PERF=$1
+cd ${PERF}/../..
+make perf-targz-src-pkg > /dev/null
+TARBALL=$(ls -rt perf-*.tar.gz)
+TMP_DEST=$(mktemp -d)
+tar xf ${TARBALL} -C $TMP_DEST
+rm -f ${TARBALL}
+cd - > /dev/null
+make -C $TMP_DEST/perf*/tools/perf > /dev/null
+RC=$?
+rm -rf ${TMP_DEST}
+exit $RC
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
new file mode 100644
index 000000000..c3aaa1ddf
--- /dev/null
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/types.h>
+#include <sys/prctl.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+#include "debug.h"
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "record.h"
+#include "tsc.h"
+#include "mmap.h"
+#include "tests.h"
+
+/*
+ * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
+ * enable the test for x86_64/i386 and Arm64 archs.
+ */
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
+#define TSC_IS_SUPPORTED 1
+#else
+#define TSC_IS_SUPPORTED 0
+#endif
+
+#define CHECK__(x) { \
+ while ((x) < 0) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+#define CHECK_NOT_NULL__(x) { \
+ while ((x) == NULL) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ if (!TSC_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture\n");
+ return TEST_SKIP;
+ }
+
+ return TEST_OK;
+}
+
+/**
+ * test__perf_time_to_tsc - test converting perf time to TSC.
+ *
+ * This function implements a test that checks that the conversion of perf time
+ * to and from TSC is consistent with the order of events. If the test passes
+ * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
+ * supported then the test passes but " (not supported)" is printed.
+ */
+static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .target = {
+ .uses_mmap = true,
+ },
+ .sample_time = true,
+ };
+ struct perf_thread_map *threads = NULL;
+ struct perf_cpu_map *cpus = NULL;
+ struct evlist *evlist = NULL;
+ struct evsel *evsel = NULL;
+ int err = TEST_FAIL, ret, i;
+ const char *comm1, *comm2;
+ struct perf_tsc_conversion tc;
+ struct perf_event_mmap_page *pc;
+ union perf_event *event;
+ u64 test_tsc, comm1_tsc, comm2_tsc;
+ u64 test_time, comm1_time = 0, comm2_time = 0;
+ struct mmap *md;
+
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ CHECK_NOT_NULL__(threads);
+
+ cpus = perf_cpu_map__new(NULL);
+ CHECK_NOT_NULL__(cpus);
+
+ evlist = evlist__new();
+ CHECK_NOT_NULL__(evlist);
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ CHECK__(parse_event(evlist, "cycles:u"));
+
+ evlist__config(evlist, &opts, NULL);
+
+ /* For hybrid "cycles:u", it creates two events */
+ evlist__for_each_entry(evlist, evsel) {
+ evsel->core.attr.comm = 1;
+ evsel->core.attr.disabled = 1;
+ evsel->core.attr.enable_on_exec = 0;
+ }
+
+ ret = evlist__open(evlist);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ err = TEST_SKIP;
+ else
+ pr_debug("evlist__open() failed\n");
+ goto out_err;
+ }
+
+ CHECK__(evlist__mmap(evlist, UINT_MAX));
+
+ pc = evlist->mmap[0].core.base;
+ ret = perf_read_tsc_conversion(pc, &tc);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
+ err = TEST_SKIP;
+ }
+ goto out_err;
+ }
+
+ evlist__enable(evlist);
+
+ comm1 = "Test COMM 1";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
+
+ test_tsc = rdtsc();
+
+ comm2 = "Test COMM 2";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
+
+ evlist__disable(evlist);
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_COMM ||
+ (pid_t)event->comm.pid != getpid() ||
+ (pid_t)event->comm.tid != getpid())
+ goto next_event;
+
+ if (strcmp(event->comm.comm, comm1) == 0) {
+ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
+ CHECK__(evsel__parse_sample(evsel, event, &sample));
+ comm1_time = sample.time;
+ }
+ if (strcmp(event->comm.comm, comm2) == 0) {
+ CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
+ CHECK__(evsel__parse_sample(evsel, event, &sample));
+ comm2_time = sample.time;
+ }
+next_event:
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+ }
+
+ if (!comm1_time || !comm2_time)
+ goto out_err;
+
+ test_time = tsc_to_perf_time(test_tsc, &tc);
+ comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
+ comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
+
+ pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm1_time, comm1_tsc);
+ pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
+ test_time, test_tsc);
+ pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm2_time, comm2_tsc);
+
+ if (test_time <= comm1_time ||
+ test_time >= comm2_time)
+ goto out_err;
+
+ if (test_tsc <= comm1_tsc ||
+ test_tsc >= comm2_tsc)
+ goto out_err;
+
+ err = TEST_OK;
+
+out_err:
+ evlist__delete(evlist);
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+ return err;
+}
+
+static struct test_case time_to_tsc_tests[] = {
+ TEST_CASE_REASON("TSC support", tsc_is_supported,
+ "This architecture does not support"),
+ TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
+ "perf_read_tsc_conversion is not supported"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__perf_time_to_tsc = {
+ .desc = "Convert perf time to TSC",
+ .test_cases = time_to_tsc_tests,
+};
diff --git a/tools/perf/tests/pfm.c b/tools/perf/tests/pfm.c
new file mode 100644
index 000000000..71b76deb1
--- /dev/null
+++ b/tools/perf/tests/pfm.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test support for libpfm4 event encodings.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include "tests.h"
+#include "util/debug.h"
+#include "util/evlist.h"
+#include "util/pfm.h"
+
+#include <linux/kernel.h>
+
+#ifdef HAVE_LIBPFM
+static int count_pfm_events(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int count = 0;
+
+ perf_evlist__for_each_entry(evlist, evsel) {
+ count++;
+ }
+ return count;
+}
+
+static int test__pfm_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct evlist *evlist;
+ struct option opt;
+ size_t i;
+ const struct {
+ const char *events;
+ int nr_events;
+ } table[] = {
+ {
+ .events = "",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions",
+ .nr_events = 1,
+ },
+ {
+ .events = "instructions,cycles",
+ .nr_events = 2,
+ },
+ {
+ .events = "stereolab",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions,instructions",
+ .nr_events = 2,
+ },
+ {
+ .events = "stereolab,instructions",
+ .nr_events = 0,
+ },
+ {
+ .events = "instructions,stereolab",
+ .nr_events = 1,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(table); i++) {
+ evlist = evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ opt.value = evlist;
+ parse_libpfm_events_option(&opt,
+ table[i].events,
+ 0);
+ TEST_ASSERT_EQUAL(table[i].events,
+ count_pfm_events(&evlist->core),
+ table[i].nr_events);
+ TEST_ASSERT_EQUAL(table[i].events,
+ evlist->core.nr_groups,
+ 0);
+
+ evlist__delete(evlist);
+ }
+ return 0;
+}
+
+static int test__pfm_group(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct evlist *evlist;
+ struct option opt;
+ size_t i;
+ const struct {
+ const char *events;
+ int nr_events;
+ int nr_groups;
+ } table[] = {
+ {
+ .events = "{},",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{instructions}",
+ .nr_events = 1,
+ .nr_groups = 1,
+ },
+ {
+ .events = "{instructions},{}",
+ .nr_events = 1,
+ .nr_groups = 1,
+ },
+ {
+ .events = "{},{instructions}",
+ .nr_events = 1,
+ .nr_groups = 1,
+ },
+ {
+ .events = "{instructions},{instructions}",
+ .nr_events = 2,
+ .nr_groups = 2,
+ },
+ {
+ .events = "{instructions,cycles},{instructions,cycles}",
+ .nr_events = 4,
+ .nr_groups = 2,
+ },
+ {
+ .events = "{stereolab}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ {
+ .events =
+ "{instructions,cycles},{instructions,stereolab}",
+ .nr_events = 3,
+ .nr_groups = 1,
+ },
+ {
+ .events = "instructions}",
+ .nr_events = 1,
+ .nr_groups = 0,
+ },
+ {
+ .events = "{{instructions}}",
+ .nr_events = 0,
+ .nr_groups = 0,
+ },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(table); i++) {
+ evlist = evlist__new();
+ if (evlist == NULL)
+ return -ENOMEM;
+
+ opt.value = evlist;
+ parse_libpfm_events_option(&opt,
+ table[i].events,
+ 0);
+ TEST_ASSERT_EQUAL(table[i].events,
+ count_pfm_events(&evlist->core),
+ table[i].nr_events);
+ TEST_ASSERT_EQUAL(table[i].events,
+ evlist->core.nr_groups,
+ table[i].nr_groups);
+
+ evlist__delete(evlist);
+ }
+ return 0;
+}
+#else
+static int test__pfm_events(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return TEST_SKIP;
+}
+
+static int test__pfm_group(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ return TEST_SKIP;
+}
+#endif
+
+static struct test_case pfm_tests[] = {
+ TEST_CASE_REASON("test of individual --pfm-events", pfm_events, "not compiled in"),
+ TEST_CASE_REASON("test groups of --pfm-events", pfm_group, "not compiled in"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__pfm = {
+ .desc = "Test libpfm4 support",
+ .test_cases = pfm_tests,
+};
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
new file mode 100644
index 000000000..097e05c79
--- /dev/null
+++ b/tools/perf/tests/pmu-events.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "math.h"
+#include "parse-events.h"
+#include "pmu.h"
+#include "tests.h"
+#include <errno.h>
+#include <stdio.h>
+#include <linux/kernel.h>
+#include <linux/zalloc.h>
+#include "debug.h"
+#include "../pmu-events/pmu-events.h"
+#include <perf/evlist.h>
+#include "util/evlist.h"
+#include "util/expr.h"
+#include "util/parse-events.h"
+#include "metricgroup.h"
+#include "stat.h"
+
+struct perf_pmu_test_event {
+ /* used for matching against events from generated pmu-events.c */
+ struct pmu_event event;
+
+ /* used for matching against event aliases */
+ /* extra events for aliases */
+ const char *alias_str;
+
+ /*
+ * Note: For when PublicDescription does not exist in the JSON, we
+ * will have no long_desc in pmu_event.long_desc, but long_desc may
+ * be set in the alias.
+ */
+ const char *alias_long_desc;
+
+ /* PMU which we should match against */
+ const char *matching_pmu;
+};
+
+struct perf_pmu_test_pmu {
+ struct perf_pmu pmu;
+ struct perf_pmu_test_event const *aliases[10];
+};
+
+static const struct perf_pmu_test_event bp_l1_btb_correct = {
+ .event = {
+ .name = "bp_l1_btb_correct",
+ .event = "event=0x8a",
+ .desc = "L1 BTB Correction",
+ .topic = "branch",
+ },
+ .alias_str = "event=0x8a",
+ .alias_long_desc = "L1 BTB Correction",
+};
+
+static const struct perf_pmu_test_event bp_l2_btb_correct = {
+ .event = {
+ .name = "bp_l2_btb_correct",
+ .event = "event=0x8b",
+ .desc = "L2 BTB Correction",
+ .topic = "branch",
+ },
+ .alias_str = "event=0x8b",
+ .alias_long_desc = "L2 BTB Correction",
+};
+
+static const struct perf_pmu_test_event segment_reg_loads_any = {
+ .event = {
+ .name = "segment_reg_loads.any",
+ .event = "event=0x6,period=200000,umask=0x80",
+ .desc = "Number of segment register loads",
+ .topic = "other",
+ },
+ .alias_str = "event=0x6,period=0x30d40,umask=0x80",
+ .alias_long_desc = "Number of segment register loads",
+};
+
+static const struct perf_pmu_test_event dispatch_blocked_any = {
+ .event = {
+ .name = "dispatch_blocked.any",
+ .event = "event=0x9,period=200000,umask=0x20",
+ .desc = "Memory cluster signals to block micro-op dispatch for any reason",
+ .topic = "other",
+ },
+ .alias_str = "event=0x9,period=0x30d40,umask=0x20",
+ .alias_long_desc = "Memory cluster signals to block micro-op dispatch for any reason",
+};
+
+static const struct perf_pmu_test_event eist_trans = {
+ .event = {
+ .name = "eist_trans",
+ .event = "event=0x3a,period=200000,umask=0x0",
+ .desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ .topic = "other",
+ },
+ .alias_str = "event=0x3a,period=0x30d40,umask=0",
+ .alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+};
+
+static const struct perf_pmu_test_event l3_cache_rd = {
+ .event = {
+ .name = "l3_cache_rd",
+ .event = "event=0x40",
+ .desc = "L3 cache access, read",
+ .long_desc = "Attributable Level 3 cache access, read",
+ .topic = "cache",
+ },
+ .alias_str = "event=0x40",
+ .alias_long_desc = "Attributable Level 3 cache access, read",
+};
+
+static const struct perf_pmu_test_event *core_events[] = {
+ &bp_l1_btb_correct,
+ &bp_l2_btb_correct,
+ &segment_reg_loads_any,
+ &dispatch_blocked_any,
+ &eist_trans,
+ &l3_cache_rd,
+ NULL
+};
+
+static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
+ .event = {
+ .name = "uncore_hisi_ddrc.flux_wcmd",
+ .event = "event=0x2",
+ .desc = "DDRC write commands. Unit: hisi_sccl,ddrc ",
+ .topic = "uncore",
+ .long_desc = "DDRC write commands",
+ .pmu = "hisi_sccl,ddrc",
+ },
+ .alias_str = "event=0x2",
+ .alias_long_desc = "DDRC write commands",
+ .matching_pmu = "hisi_sccl1_ddrc2",
+};
+
+static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
+ .event = {
+ .name = "unc_cbo_xsnp_response.miss_eviction",
+ .event = "event=0x22,umask=0x81",
+ .desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .pmu = "uncore_cbox",
+ },
+ .alias_str = "event=0x22,umask=0x81",
+ .alias_long_desc = "A cross-core snoop resulted from L3 Eviction which misses in some processor core",
+ .matching_pmu = "uncore_cbox_0",
+};
+
+static const struct perf_pmu_test_event uncore_hyphen = {
+ .event = {
+ .name = "event-hyphen",
+ .event = "event=0xe0,umask=0x00",
+ .desc = "UNC_CBO_HYPHEN. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "UNC_CBO_HYPHEN",
+ .pmu = "uncore_cbox",
+ },
+ .alias_str = "event=0xe0,umask=0",
+ .alias_long_desc = "UNC_CBO_HYPHEN",
+ .matching_pmu = "uncore_cbox_0",
+};
+
+static const struct perf_pmu_test_event uncore_two_hyph = {
+ .event = {
+ .name = "event-two-hyph",
+ .event = "event=0xc0,umask=0x00",
+ .desc = "UNC_CBO_TWO_HYPH. Unit: uncore_cbox ",
+ .topic = "uncore",
+ .long_desc = "UNC_CBO_TWO_HYPH",
+ .pmu = "uncore_cbox",
+ },
+ .alias_str = "event=0xc0,umask=0",
+ .alias_long_desc = "UNC_CBO_TWO_HYPH",
+ .matching_pmu = "uncore_cbox_0",
+};
+
+static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
+ .event = {
+ .name = "uncore_hisi_l3c.rd_hit_cpipe",
+ .event = "event=0x7",
+ .desc = "Total read hits. Unit: hisi_sccl,l3c ",
+ .topic = "uncore",
+ .long_desc = "Total read hits",
+ .pmu = "hisi_sccl,l3c",
+ },
+ .alias_str = "event=0x7",
+ .alias_long_desc = "Total read hits",
+ .matching_pmu = "hisi_sccl3_l3c7",
+};
+
+static const struct perf_pmu_test_event uncore_imc_free_running_cache_miss = {
+ .event = {
+ .name = "uncore_imc_free_running.cache_miss",
+ .event = "event=0x12",
+ .desc = "Total cache misses. Unit: uncore_imc_free_running ",
+ .topic = "uncore",
+ .long_desc = "Total cache misses",
+ .pmu = "uncore_imc_free_running",
+ },
+ .alias_str = "event=0x12",
+ .alias_long_desc = "Total cache misses",
+ .matching_pmu = "uncore_imc_free_running_0",
+};
+
+static const struct perf_pmu_test_event uncore_imc_cache_hits = {
+ .event = {
+ .name = "uncore_imc.cache_hits",
+ .event = "event=0x34",
+ .desc = "Total cache hits. Unit: uncore_imc ",
+ .topic = "uncore",
+ .long_desc = "Total cache hits",
+ .pmu = "uncore_imc",
+ },
+ .alias_str = "event=0x34",
+ .alias_long_desc = "Total cache hits",
+ .matching_pmu = "uncore_imc_0",
+};
+
+static const struct perf_pmu_test_event *uncore_events[] = {
+ &uncore_hisi_ddrc_flux_wcmd,
+ &unc_cbo_xsnp_response_miss_eviction,
+ &uncore_hyphen,
+ &uncore_two_hyph,
+ &uncore_hisi_l3c_rd_hit_cpipe,
+ &uncore_imc_free_running_cache_miss,
+ &uncore_imc_cache_hits,
+ NULL
+};
+
+static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = {
+ .event = {
+ .name = "sys_ddr_pmu.write_cycles",
+ .event = "event=0x2b",
+ .desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
+ .topic = "uncore",
+ .pmu = "uncore_sys_ddr_pmu",
+ .compat = "v8",
+ },
+ .alias_str = "event=0x2b",
+ .alias_long_desc = "ddr write-cycles event. Unit: uncore_sys_ddr_pmu ",
+ .matching_pmu = "uncore_sys_ddr_pmu",
+};
+
+static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
+ .event = {
+ .name = "sys_ccn_pmu.read_cycles",
+ .event = "config=0x2c",
+ .desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .topic = "uncore",
+ .pmu = "uncore_sys_ccn_pmu",
+ .compat = "0x01",
+ },
+ .alias_str = "config=0x2c",
+ .alias_long_desc = "ccn read-cycles event. Unit: uncore_sys_ccn_pmu ",
+ .matching_pmu = "uncore_sys_ccn_pmu",
+};
+
+static const struct perf_pmu_test_event *sys_events[] = {
+ &sys_ddr_pmu_write_cycles,
+ &sys_ccn_pmu_read_cycles,
+ NULL
+};
+
+static bool is_same(const char *reference, const char *test)
+{
+ if (!reference && !test)
+ return true;
+
+ if (reference && !test)
+ return false;
+
+ if (!reference && test)
+ return false;
+
+ return !strcmp(reference, test);
+}
+
+static int compare_pmu_events(const struct pmu_event *e1, const struct pmu_event *e2)
+{
+ if (!is_same(e1->name, e2->name)) {
+ pr_debug2("testing event e1 %s: mismatched name string, %s vs %s\n",
+ e1->name, e1->name, e2->name);
+ return -1;
+ }
+
+ if (!is_same(e1->compat, e2->compat)) {
+ pr_debug2("testing event e1 %s: mismatched compat string, %s vs %s\n",
+ e1->name, e1->compat, e2->compat);
+ return -1;
+ }
+
+ if (!is_same(e1->event, e2->event)) {
+ pr_debug2("testing event e1 %s: mismatched event, %s vs %s\n",
+ e1->name, e1->event, e2->event);
+ return -1;
+ }
+
+ if (!is_same(e1->desc, e2->desc)) {
+ pr_debug2("testing event e1 %s: mismatched desc, %s vs %s\n",
+ e1->name, e1->desc, e2->desc);
+ return -1;
+ }
+
+ if (!is_same(e1->topic, e2->topic)) {
+ pr_debug2("testing event e1 %s: mismatched topic, %s vs %s\n",
+ e1->name, e1->topic, e2->topic);
+ return -1;
+ }
+
+ if (!is_same(e1->long_desc, e2->long_desc)) {
+ pr_debug2("testing event e1 %s: mismatched long_desc, %s vs %s\n",
+ e1->name, e1->long_desc, e2->long_desc);
+ return -1;
+ }
+
+ if (!is_same(e1->pmu, e2->pmu)) {
+ pr_debug2("testing event e1 %s: mismatched pmu string, %s vs %s\n",
+ e1->name, e1->pmu, e2->pmu);
+ return -1;
+ }
+
+ if (!is_same(e1->unit, e2->unit)) {
+ pr_debug2("testing event e1 %s: mismatched unit, %s vs %s\n",
+ e1->name, e1->unit, e2->unit);
+ return -1;
+ }
+
+ if (!is_same(e1->perpkg, e2->perpkg)) {
+ pr_debug2("testing event e1 %s: mismatched perpkg, %s vs %s\n",
+ e1->name, e1->perpkg, e2->perpkg);
+ return -1;
+ }
+
+ if (!is_same(e1->aggr_mode, e2->aggr_mode)) {
+ pr_debug2("testing event e1 %s: mismatched aggr_mode, %s vs %s\n",
+ e1->name, e1->aggr_mode, e2->aggr_mode);
+ return -1;
+ }
+
+ if (!is_same(e1->metric_expr, e2->metric_expr)) {
+ pr_debug2("testing event e1 %s: mismatched metric_expr, %s vs %s\n",
+ e1->name, e1->metric_expr, e2->metric_expr);
+ return -1;
+ }
+
+ if (!is_same(e1->metric_name, e2->metric_name)) {
+ pr_debug2("testing event e1 %s: mismatched metric_name, %s vs %s\n",
+ e1->name, e1->metric_name, e2->metric_name);
+ return -1;
+ }
+
+ if (!is_same(e1->metric_group, e2->metric_group)) {
+ pr_debug2("testing event e1 %s: mismatched metric_group, %s vs %s\n",
+ e1->name, e1->metric_group, e2->metric_group);
+ return -1;
+ }
+
+ if (!is_same(e1->deprecated, e2->deprecated)) {
+ pr_debug2("testing event e1 %s: mismatched deprecated, %s vs %s\n",
+ e1->name, e1->deprecated, e2->deprecated);
+ return -1;
+ }
+
+ if (!is_same(e1->metric_constraint, e2->metric_constraint)) {
+ pr_debug2("testing event e1 %s: mismatched metric_constant, %s vs %s\n",
+ e1->name, e1->metric_constraint, e2->metric_constraint);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int compare_alias_to_test_event(struct perf_pmu_alias *alias,
+ struct perf_pmu_test_event const *test_event,
+ char const *pmu_name)
+{
+ struct pmu_event const *event = &test_event->event;
+
+ /* An alias was found, ensure everything is in order */
+ if (!is_same(alias->name, event->name)) {
+ pr_debug("testing aliases PMU %s: mismatched name, %s vs %s\n",
+ pmu_name, alias->name, event->name);
+ return -1;
+ }
+
+ if (!is_same(alias->desc, event->desc)) {
+ pr_debug("testing aliases PMU %s: mismatched desc, %s vs %s\n",
+ pmu_name, alias->desc, event->desc);
+ return -1;
+ }
+
+ if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
+ pr_debug("testing aliases PMU %s: mismatched long_desc, %s vs %s\n",
+ pmu_name, alias->long_desc,
+ test_event->alias_long_desc);
+ return -1;
+ }
+
+ if (!is_same(alias->topic, event->topic)) {
+ pr_debug("testing aliases PMU %s: mismatched topic, %s vs %s\n",
+ pmu_name, alias->topic, event->topic);
+ return -1;
+ }
+
+ if (!is_same(alias->str, test_event->alias_str)) {
+ pr_debug("testing aliases PMU %s: mismatched str, %s vs %s\n",
+ pmu_name, alias->str, test_event->alias_str);
+ return -1;
+ }
+
+ if (!is_same(alias->long_desc, test_event->alias_long_desc)) {
+ pr_debug("testing aliases PMU %s: mismatched long desc, %s vs %s\n",
+ pmu_name, alias->str, test_event->alias_long_desc);
+ return -1;
+ }
+
+
+ if (!is_same(alias->pmu_name, test_event->event.pmu)) {
+ pr_debug("testing aliases PMU %s: mismatched pmu_name, %s vs %s\n",
+ pmu_name, alias->pmu_name, test_event->event.pmu);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int test__pmu_event_table_core_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
+{
+ int *map_events = data;
+ struct perf_pmu_test_event const **test_event_table;
+ bool found = false;
+
+ if (!pe->name)
+ return 0;
+
+ if (pe->pmu)
+ test_event_table = &uncore_events[0];
+ else
+ test_event_table = &core_events[0];
+
+ for (; *test_event_table; test_event_table++) {
+ struct perf_pmu_test_event const *test_event = *test_event_table;
+ struct pmu_event const *event = &test_event->event;
+
+ if (strcmp(pe->name, event->name))
+ continue;
+ found = true;
+ (*map_events)++;
+
+ if (compare_pmu_events(pe, event))
+ return -1;
+
+ pr_debug("testing event table %s: pass\n", pe->name);
+ }
+ if (!found) {
+ pr_err("testing event table: could not find event %s\n", pe->name);
+ return -1;
+ }
+ return 0;
+}
+
+static int test__pmu_event_table_sys_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data)
+{
+ int *map_events = data;
+ struct perf_pmu_test_event const **test_event_table;
+ bool found = false;
+
+ test_event_table = &sys_events[0];
+
+ for (; *test_event_table; test_event_table++) {
+ struct perf_pmu_test_event const *test_event = *test_event_table;
+ struct pmu_event const *event = &test_event->event;
+
+ if (strcmp(pe->name, event->name))
+ continue;
+ found = true;
+ (*map_events)++;
+
+ if (compare_pmu_events(pe, event))
+ return TEST_FAIL;
+
+ pr_debug("testing sys event table %s: pass\n", pe->name);
+ }
+ if (!found) {
+ pr_debug("testing sys event table: could not find event %s\n", pe->name);
+ return TEST_FAIL;
+ }
+ return TEST_OK;
+}
+
+/* Verify generated events from pmu-events.c are as expected */
+static int test__pmu_event_table(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ const struct pmu_events_table *sys_event_table = find_sys_events_table("pme_test_soc_sys");
+ const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
+ int map_events = 0, expected_events, err;
+
+ /* ignore 3x sentinels */
+ expected_events = ARRAY_SIZE(core_events) +
+ ARRAY_SIZE(uncore_events) +
+ ARRAY_SIZE(sys_events) - 3;
+
+ if (!table || !sys_event_table)
+ return -1;
+
+ err = pmu_events_table_for_each_event(table, test__pmu_event_table_core_callback,
+ &map_events);
+ if (err)
+ return err;
+
+ err = pmu_events_table_for_each_event(sys_event_table, test__pmu_event_table_sys_callback,
+ &map_events);
+ if (err)
+ return err;
+
+ if (map_events != expected_events) {
+ pr_err("testing event table: found %d, but expected %d\n",
+ map_events, expected_events);
+ return TEST_FAIL;
+ }
+
+ return 0;
+}
+
+static struct perf_pmu_alias *find_alias(const char *test_event, struct list_head *aliases)
+{
+ struct perf_pmu_alias *alias;
+
+ list_for_each_entry(alias, aliases, list)
+ if (!strcmp(test_event, alias->name))
+ return alias;
+
+ return NULL;
+}
+
+/* Verify aliases are as expected */
+static int __test_core_pmu_event_aliases(char *pmu_name, int *count)
+{
+ struct perf_pmu_test_event const **test_event_table;
+ struct perf_pmu *pmu;
+ LIST_HEAD(aliases);
+ int res = 0;
+ const struct pmu_events_table *table = find_core_events_table("testarch", "testcpu");
+ struct perf_pmu_alias *a, *tmp;
+
+ if (!table)
+ return -1;
+
+ test_event_table = &core_events[0];
+
+ pmu = zalloc(sizeof(*pmu));
+ if (!pmu)
+ return -1;
+
+ pmu->name = pmu_name;
+
+ pmu_add_cpu_aliases_table(&aliases, pmu, table);
+
+ for (; *test_event_table; test_event_table++) {
+ struct perf_pmu_test_event const *test_event = *test_event_table;
+ struct pmu_event const *event = &test_event->event;
+ struct perf_pmu_alias *alias = find_alias(event->name, &aliases);
+
+ if (!alias) {
+ pr_debug("testing aliases core PMU %s: no alias, alias_table->name=%s\n",
+ pmu_name, event->name);
+ res = -1;
+ break;
+ }
+
+ if (compare_alias_to_test_event(alias, test_event, pmu_name)) {
+ res = -1;
+ break;
+ }
+
+ (*count)++;
+ pr_debug2("testing aliases core PMU %s: matched event %s\n",
+ pmu_name, alias->name);
+ }
+
+ list_for_each_entry_safe(a, tmp, &aliases, list) {
+ list_del(&a->list);
+ perf_pmu_free_alias(a);
+ }
+ free(pmu);
+ return res;
+}
+
+static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu)
+{
+ int alias_count = 0, to_match_count = 0, matched_count = 0;
+ struct perf_pmu_test_event const **table;
+ struct perf_pmu *pmu = &test_pmu->pmu;
+ const char *pmu_name = pmu->name;
+ struct perf_pmu_alias *a, *tmp, *alias;
+ const struct pmu_events_table *events_table;
+ LIST_HEAD(aliases);
+ int res = 0;
+
+ events_table = find_core_events_table("testarch", "testcpu");
+ if (!events_table)
+ return -1;
+ pmu_add_cpu_aliases_table(&aliases, pmu, events_table);
+ pmu_add_sys_aliases(&aliases, pmu);
+
+ /* Count how many aliases we generated */
+ list_for_each_entry(alias, &aliases, list)
+ alias_count++;
+
+ /* Count how many aliases we expect from the known table */
+ for (table = &test_pmu->aliases[0]; *table; table++)
+ to_match_count++;
+
+ if (alias_count != to_match_count) {
+ pr_debug("testing aliases uncore PMU %s: mismatch expected aliases (%d) vs found (%d)\n",
+ pmu_name, to_match_count, alias_count);
+ res = -1;
+ goto out;
+ }
+
+ list_for_each_entry(alias, &aliases, list) {
+ bool matched = false;
+
+ for (table = &test_pmu->aliases[0]; *table; table++) {
+ struct perf_pmu_test_event const *test_event = *table;
+ struct pmu_event const *event = &test_event->event;
+
+ if (!strcmp(event->name, alias->name)) {
+ if (compare_alias_to_test_event(alias,
+ test_event,
+ pmu_name)) {
+ continue;
+ }
+ matched = true;
+ matched_count++;
+ }
+ }
+
+ if (matched == false) {
+ pr_debug("testing aliases uncore PMU %s: could not match alias %s\n",
+ pmu_name, alias->name);
+ res = -1;
+ goto out;
+ }
+ }
+
+ if (alias_count != matched_count) {
+ pr_debug("testing aliases uncore PMU %s: mismatch found aliases (%d) vs matched (%d)\n",
+ pmu_name, matched_count, alias_count);
+ res = -1;
+ }
+
+out:
+ list_for_each_entry_safe(a, tmp, &aliases, list) {
+ list_del(&a->list);
+ perf_pmu_free_alias(a);
+ }
+ return res;
+}
+
+static struct perf_pmu_test_pmu test_pmus[] = {
+ {
+ .pmu = {
+ .name = (char *)"hisi_sccl1_ddrc2",
+ .is_uncore = 1,
+ },
+ .aliases = {
+ &uncore_hisi_ddrc_flux_wcmd,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_cbox_0",
+ .is_uncore = 1,
+ },
+ .aliases = {
+ &unc_cbo_xsnp_response_miss_eviction,
+ &uncore_hyphen,
+ &uncore_two_hyph,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"hisi_sccl3_l3c7",
+ .is_uncore = 1,
+ },
+ .aliases = {
+ &uncore_hisi_l3c_rd_hit_cpipe,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_imc_free_running_0",
+ .is_uncore = 1,
+ },
+ .aliases = {
+ &uncore_imc_free_running_cache_miss,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_imc_0",
+ .is_uncore = 1,
+ },
+ .aliases = {
+ &uncore_imc_cache_hits,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_ddr_pmu0",
+ .is_uncore = 1,
+ .id = (char *)"v8",
+ },
+ .aliases = {
+ &sys_ddr_pmu_write_cycles,
+ },
+ },
+ {
+ .pmu = {
+ .name = (char *)"uncore_sys_ccn_pmu4",
+ .is_uncore = 1,
+ .id = (char *)"0x01",
+ },
+ .aliases = {
+ &sys_ccn_pmu_read_cycles,
+ },
+ },
+};
+
+/* Test that aliases generated are as expected */
+static int test__aliases(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct perf_pmu *pmu = NULL;
+ unsigned long i;
+
+ while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+ int count = 0;
+
+ if (!is_pmu_core(pmu->name))
+ continue;
+
+ if (list_empty(&pmu->format)) {
+ pr_debug2("skipping testing core PMU %s\n", pmu->name);
+ continue;
+ }
+
+ if (__test_core_pmu_event_aliases(pmu->name, &count)) {
+ pr_debug("testing core PMU %s aliases: failed\n", pmu->name);
+ return -1;
+ }
+
+ if (count == 0) {
+ pr_debug("testing core PMU %s aliases: no events to match\n",
+ pmu->name);
+ return -1;
+ }
+
+ pr_debug("testing core PMU %s aliases: pass\n", pmu->name);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(test_pmus); i++) {
+ int res = __test_uncore_pmu_event_aliases(&test_pmus[i]);
+
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+static bool is_number(const char *str)
+{
+ char *end_ptr;
+ double v;
+
+ errno = 0;
+ v = strtod(str, &end_ptr);
+ (void)v; // We're not interested in this value, only if it is valid
+ return errno == 0 && end_ptr != str;
+}
+
+static int check_parse_id(const char *id, struct parse_events_error *error,
+ struct perf_pmu *fake_pmu)
+{
+ struct evlist *evlist;
+ int ret;
+ char *dup, *cur;
+
+ /* Numbers are always valid. */
+ if (is_number(id))
+ return 0;
+
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ dup = strdup(id);
+ if (!dup)
+ return -ENOMEM;
+
+ for (cur = strchr(dup, '@') ; cur; cur = strchr(++cur, '@'))
+ *cur = '/';
+
+ if (fake_pmu) {
+ /*
+ * Every call to __parse_events will try to initialize the PMU
+ * state from sysfs and then clean it up at the end. Reset the
+ * PMU events to the test state so that we don't pick up
+ * erroneous prefixes and suffixes.
+ */
+ perf_pmu__test_parse_init();
+ }
+ ret = __parse_events(evlist, dup, error, fake_pmu);
+ free(dup);
+
+ evlist__delete(evlist);
+ return ret;
+}
+
+static int check_parse_fake(const char *id)
+{
+ struct parse_events_error error;
+ int ret;
+
+ parse_events_error__init(&error);
+ ret = check_parse_id(id, &error, &perf_pmu__fake);
+ parse_events_error__exit(&error);
+ return ret;
+}
+
+struct metric {
+ struct list_head list;
+ struct metric_ref metric_ref;
+};
+
+static int test__parsing_callback(const struct pmu_event *pe, const struct pmu_events_table *table,
+ void *data)
+{
+ int *failures = data;
+ int k;
+ struct evlist *evlist;
+ struct perf_cpu_map *cpus;
+ struct runtime_stat st;
+ struct evsel *evsel;
+ struct rblist metric_events = {
+ .nr_entries = 0,
+ };
+ int err = 0;
+
+ if (!pe->metric_expr)
+ return 0;
+
+ pr_debug("Found metric '%s'\n", pe->metric_name);
+ (*failures)++;
+
+ /*
+ * We need to prepare evlist for stat mode running on CPU 0
+ * because that's where all the stats are going to be created.
+ */
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ cpus = perf_cpu_map__new("0");
+ if (!cpus) {
+ evlist__delete(evlist);
+ return -ENOMEM;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, NULL);
+ runtime_stat__init(&st);
+
+ err = metricgroup__parse_groups_test(evlist, table, pe->metric_name,
+ false, false,
+ &metric_events);
+ if (err) {
+ if (!strcmp(pe->metric_name, "M1") || !strcmp(pe->metric_name, "M2") ||
+ !strcmp(pe->metric_name, "M3")) {
+ (*failures)--;
+ pr_debug("Expected broken metric %s skipping\n", pe->metric_name);
+ err = 0;
+ }
+ goto out_err;
+ }
+
+ err = evlist__alloc_stats(evlist, false);
+ if (err)
+ goto out_err;
+ /*
+ * Add all ids with a made up value. The value may trigger divide by
+ * zero when subtracted and so try to make them unique.
+ */
+ k = 1;
+ perf_stat__reset_shadow_stats();
+ evlist__for_each_entry(evlist, evsel) {
+ perf_stat__update_shadow_stats(evsel, k, 0, &st);
+ if (!strcmp(evsel->name, "duration_time"))
+ update_stats(&walltime_nsecs_stats, k);
+ k++;
+ }
+ evlist__for_each_entry(evlist, evsel) {
+ struct metric_event *me = metricgroup__lookup(&metric_events, evsel, false);
+
+ if (me != NULL) {
+ struct metric_expr *mexp;
+
+ list_for_each_entry (mexp, &me->head, nd) {
+ if (strcmp(mexp->metric_name, pe->metric_name))
+ continue;
+ pr_debug("Result %f\n", test_generic_metric(mexp, 0, &st));
+ err = 0;
+ (*failures)--;
+ goto out_err;
+ }
+ }
+ }
+ pr_debug("Didn't find parsed metric %s", pe->metric_name);
+ err = 1;
+out_err:
+ if (err)
+ pr_debug("Broken metric %s\n", pe->metric_name);
+
+ /* ... cleanup. */
+ metricgroup__rblist_exit(&metric_events);
+ runtime_stat__exit(&st);
+ evlist__free_stats(evlist);
+ perf_cpu_map__put(cpus);
+ evlist__delete(evlist);
+ return err;
+}
+
+static int test__parsing(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int failures = 0;
+
+ pmu_for_each_core_event(test__parsing_callback, &failures);
+ pmu_for_each_sys_event(test__parsing_callback, &failures);
+
+ return failures == 0 ? TEST_OK : TEST_FAIL;
+}
+
+struct test_metric {
+ const char *str;
+};
+
+static struct test_metric metrics[] = {
+ { "(unc_p_power_state_occupancy.cores_c0 / unc_p_clockticks) * 100." },
+ { "imx8_ddr0@read\\-cycles@ * 4 * 4", },
+ { "imx8_ddr0@axid\\-read\\,axi_mask\\=0xffff\\,axi_id\\=0x0000@ * 4", },
+ { "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100", },
+ { "(imx8_ddr0@read\\-cycles@ + imx8_ddr0@write\\-cycles@)", },
+};
+
+static int metric_parse_fake(const char *str)
+{
+ struct expr_parse_ctx *ctx;
+ struct hashmap_entry *cur;
+ double result;
+ int ret = -1;
+ size_t bkt;
+ int i;
+
+ pr_debug("parsing '%s'\n", str);
+
+ ctx = expr__ctx_new();
+ if (!ctx) {
+ pr_debug("expr__ctx_new failed");
+ return TEST_FAIL;
+ }
+ if (expr__find_ids(str, NULL, ctx) < 0) {
+ pr_err("expr__find_ids failed\n");
+ return -1;
+ }
+
+ /*
+ * Add all ids with a made up value. The value may
+ * trigger divide by zero when subtracted and so try to
+ * make them unique.
+ */
+ i = 1;
+ hashmap__for_each_entry(ctx->ids, cur, bkt)
+ expr__add_id_val(ctx, strdup(cur->key), i++);
+
+ hashmap__for_each_entry(ctx->ids, cur, bkt) {
+ if (check_parse_fake(cur->key)) {
+ pr_err("check_parse_fake failed\n");
+ goto out;
+ }
+ }
+
+ ret = 0;
+ if (expr__parse(&result, ctx, str)) {
+ /*
+ * Parsing failed, make numbers go from large to small which can
+ * resolve divide by zero issues.
+ */
+ i = 1024;
+ hashmap__for_each_entry(ctx->ids, cur, bkt)
+ expr__add_id_val(ctx, strdup(cur->key), i--);
+ if (expr__parse(&result, ctx, str)) {
+ pr_err("expr__parse failed\n");
+ ret = -1;
+ }
+ }
+
+out:
+ expr__ctx_free(ctx);
+ return ret;
+}
+
+static int test__parsing_fake_callback(const struct pmu_event *pe,
+ const struct pmu_events_table *table __maybe_unused,
+ void *data __maybe_unused)
+{
+ if (!pe->metric_expr)
+ return 0;
+
+ return metric_parse_fake(pe->metric_expr);
+}
+
+/*
+ * Parse all the metrics for current architecture,
+ * or all defined cpus via the 'fake_pmu'
+ * in parse_events.
+ */
+static int test__parsing_fake(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err = 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(metrics); i++) {
+ err = metric_parse_fake(metrics[i].str);
+ if (err)
+ return err;
+ }
+
+ err = pmu_for_each_core_event(test__parsing_fake_callback, NULL);
+ if (err)
+ return err;
+
+ return pmu_for_each_sys_event(test__parsing_fake_callback, NULL);
+}
+
+static struct test_case pmu_events_tests[] = {
+ TEST_CASE("PMU event table sanity", pmu_event_table),
+ TEST_CASE("PMU event map aliases", aliases),
+ TEST_CASE_REASON("Parsing of PMU event table metrics", parsing,
+ "some metrics failed"),
+ TEST_CASE("Parsing of PMU event table metrics with fake PMUs", parsing_fake),
+ { .name = NULL, }
+};
+
+struct test_suite suite__pmu_events = {
+ .desc = "PMU events",
+ .test_cases = pmu_events_tests,
+};
diff --git a/tools/perf/tests/pmu.c b/tools/perf/tests/pmu.c
new file mode 100644
index 000000000..8507bd615
--- /dev/null
+++ b/tools/perf/tests/pmu.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "parse-events.h"
+#include "pmu.h"
+#include "tests.h"
+#include <errno.h>
+#include <stdio.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+
+/* Simulated format definitions. */
+static struct test_format {
+ const char *name;
+ const char *value;
+} test_formats[] = {
+ { "krava01", "config:0-1,62-63\n", },
+ { "krava02", "config:10-17\n", },
+ { "krava03", "config:5\n", },
+ { "krava11", "config1:0,2,4,6,8,20-28\n", },
+ { "krava12", "config1:63\n", },
+ { "krava13", "config1:45-47\n", },
+ { "krava21", "config2:0-3,10-13,20-23,30-33,40-43,50-53,60-63\n", },
+ { "krava22", "config2:8,18,48,58\n", },
+ { "krava23", "config2:28-29,38\n", },
+};
+
+/* Simulated users input. */
+static struct parse_events_term test_terms[] = {
+ {
+ .config = (char *) "krava01",
+ .val.num = 15,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava02",
+ .val.num = 170,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava03",
+ .val.num = 1,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava11",
+ .val.num = 27,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava12",
+ .val.num = 1,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava13",
+ .val.num = 2,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava21",
+ .val.num = 119,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava22",
+ .val.num = 11,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+ {
+ .config = (char *) "krava23",
+ .val.num = 2,
+ .type_val = PARSE_EVENTS__TERM_TYPE_NUM,
+ .type_term = PARSE_EVENTS__TERM_TYPE_USER,
+ },
+};
+
+/*
+ * Prepare format directory data, exported by kernel
+ * at /sys/bus/event_source/devices/<dev>/format.
+ */
+static char *test_format_dir_get(void)
+{
+ static char dir[PATH_MAX];
+ unsigned int i;
+
+ snprintf(dir, PATH_MAX, "/tmp/perf-pmu-test-format-XXXXXX");
+ if (!mkdtemp(dir))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(test_formats); i++) {
+ static char name[PATH_MAX];
+ struct test_format *format = &test_formats[i];
+ FILE *file;
+
+ scnprintf(name, PATH_MAX, "%s/%s", dir, format->name);
+
+ file = fopen(name, "w");
+ if (!file)
+ return NULL;
+
+ if (1 != fwrite(format->value, strlen(format->value), 1, file))
+ break;
+
+ fclose(file);
+ }
+
+ return dir;
+}
+
+/* Cleanup format directory. */
+static int test_format_dir_put(char *dir)
+{
+ char buf[PATH_MAX];
+ snprintf(buf, PATH_MAX, "rm -f %s/*\n", dir);
+ if (system(buf))
+ return -1;
+
+ snprintf(buf, PATH_MAX, "rmdir %s\n", dir);
+ return system(buf);
+}
+
+static struct list_head *test_terms_list(void)
+{
+ static LIST_HEAD(terms);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_terms); i++)
+ list_add_tail(&test_terms[i].list, &terms);
+
+ return &terms;
+}
+
+static int test__pmu(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ char *format = test_format_dir_get();
+ LIST_HEAD(formats);
+ struct list_head *terms = test_terms_list();
+ int ret;
+
+ if (!format)
+ return -EINVAL;
+
+ do {
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+
+ ret = perf_pmu__format_parse(format, &formats);
+ if (ret)
+ break;
+
+ ret = perf_pmu__config_terms("perf-pmu-test", &formats, &attr,
+ terms, false, NULL);
+ if (ret)
+ break;
+
+ ret = -EINVAL;
+
+ if (attr.config != 0xc00000000002a823)
+ break;
+ if (attr.config1 != 0x8000400000000145)
+ break;
+ if (attr.config2 != 0x0400000020041d07)
+ break;
+
+ ret = 0;
+ } while (0);
+
+ perf_pmu__del_formats(&formats);
+ test_format_dir_put(format);
+ return ret;
+}
+
+DEFINE_SUITE("Parse perf pmu format", pmu);
diff --git a/tools/perf/tests/python-use.c b/tools/perf/tests/python-use.c
new file mode 100644
index 000000000..6b990ee38
--- /dev/null
+++ b/tools/perf/tests/python-use.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Just test if we can load the python binding.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+#include "tests.h"
+#include "util/debug.h"
+
+static int test__python_use(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ char *cmd;
+ int ret;
+
+ if (asprintf(&cmd, "echo \"import sys ; sys.path.append('%s'); import perf\" | %s %s",
+ PYTHONPATH, PYTHON, verbose > 0 ? "" : "2> /dev/null") < 0)
+ return -1;
+
+ pr_debug("python usage test: \"%s\"\n", cmd);
+ ret = system(cmd) ? -1 : 0;
+ free(cmd);
+ return ret;
+}
+
+DEFINE_SUITE("'import perf' in python", python_use);
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
new file mode 100644
index 000000000..20930dd48
--- /dev/null
+++ b/tools/perf/tests/sample-parsing.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdbool.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "map_symbol.h"
+#include "branch.h"
+#include "event.h"
+#include "evsel.h"
+#include "debug.h"
+#include "util/synthetic-events.h"
+#include "util/trace-event.h"
+
+#include "tests.h"
+
+#define COMP(m) do { \
+ if (s1->m != s2->m) { \
+ pr_debug("Samples differ at '"#m"'\n"); \
+ return false; \
+ } \
+} while (0)
+
+#define MCOMP(m) do { \
+ if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
+ pr_debug("Samples differ at '"#m"'\n"); \
+ return false; \
+ } \
+} while (0)
+
+/*
+ * Hardcode the expected values for branch_entry flags.
+ * These are based on the input value (213) specified
+ * in branch_stack variable.
+ */
+#define BS_EXPECTED_BE 0xa000d00000000000
+#define BS_EXPECTED_LE 0xd5000000
+#define FLAG(s) s->branch_stack->entries[i].flags
+
+static bool samples_same(const struct perf_sample *s1,
+ const struct perf_sample *s2,
+ u64 type, u64 read_format, bool needs_swap)
+{
+ size_t i;
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ COMP(id);
+
+ if (type & PERF_SAMPLE_IP)
+ COMP(ip);
+
+ if (type & PERF_SAMPLE_TID) {
+ COMP(pid);
+ COMP(tid);
+ }
+
+ if (type & PERF_SAMPLE_TIME)
+ COMP(time);
+
+ if (type & PERF_SAMPLE_ADDR)
+ COMP(addr);
+
+ if (type & PERF_SAMPLE_ID)
+ COMP(id);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ COMP(stream_id);
+
+ if (type & PERF_SAMPLE_CPU)
+ COMP(cpu);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ COMP(period);
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ COMP(read.group.nr);
+ else
+ COMP(read.one.value);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ COMP(read.time_enabled);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ COMP(read.time_running);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ for (i = 0; i < s1->read.group.nr; i++) {
+ /* FIXME: check values without LOST */
+ if (read_format & PERF_FORMAT_LOST)
+ MCOMP(read.group.values[i]);
+ }
+ } else {
+ COMP(read.one.id);
+ if (read_format & PERF_FORMAT_LOST)
+ COMP(read.one.lost);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ COMP(callchain->nr);
+ for (i = 0; i < s1->callchain->nr; i++)
+ COMP(callchain->ips[i]);
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ COMP(raw_size);
+ if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
+ pr_debug("Samples differ at 'raw_data'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ COMP(branch_stack->nr);
+ COMP(branch_stack->hw_idx);
+ for (i = 0; i < s1->branch_stack->nr; i++) {
+ if (needs_swap)
+ return ((tep_is_bigendian()) ?
+ (FLAG(s2).value == BS_EXPECTED_BE) :
+ (FLAG(s2).value == BS_EXPECTED_LE));
+ else
+ MCOMP(branch_stack->entries[i]);
+ }
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
+
+ COMP(user_regs.mask);
+ COMP(user_regs.abi);
+ if (s1->user_regs.abi &&
+ (!s1->user_regs.regs || !s2->user_regs.regs ||
+ memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
+ pr_debug("Samples differ at 'user_regs'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ COMP(user_stack.size);
+ if (memcmp(s1->user_stack.data, s2->user_stack.data,
+ s1->user_stack.size)) {
+ pr_debug("Samples differ at 'user_stack'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ COMP(weight);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ COMP(data_src);
+
+ if (type & PERF_SAMPLE_TRANSACTION)
+ COMP(transaction);
+
+ if (type & PERF_SAMPLE_REGS_INTR) {
+ size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
+
+ COMP(intr_regs.mask);
+ COMP(intr_regs.abi);
+ if (s1->intr_regs.abi &&
+ (!s1->intr_regs.regs || !s2->intr_regs.regs ||
+ memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
+ pr_debug("Samples differ at 'intr_regs'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_PHYS_ADDR)
+ COMP(phys_addr);
+
+ if (type & PERF_SAMPLE_CGROUP)
+ COMP(cgroup);
+
+ if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
+ COMP(data_page_size);
+
+ if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
+ COMP(code_page_size);
+
+ if (type & PERF_SAMPLE_AUX) {
+ COMP(aux_sample.size);
+ if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
+ s1->aux_sample.size)) {
+ pr_debug("Samples differ at 'aux_sample'\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
+{
+ struct evsel evsel = {
+ .needs_swap = false,
+ .core = {
+ . attr = {
+ .sample_type = sample_type,
+ .read_format = read_format,
+ },
+ },
+ };
+ union perf_event *event;
+ union {
+ struct ip_callchain callchain;
+ u64 data[64];
+ } callchain = {
+ /* 3 ips */
+ .data = {3, 201, 202, 203},
+ };
+ union {
+ struct branch_stack branch_stack;
+ u64 data[64];
+ } branch_stack = {
+ /* 1 branch_entry */
+ .data = {1, -1ULL, 211, 212, 213},
+ };
+ u64 regs[64];
+ const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
+ const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
+ const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
+ struct perf_sample sample = {
+ .ip = 101,
+ .pid = 102,
+ .tid = 103,
+ .time = 104,
+ .addr = 105,
+ .id = 106,
+ .stream_id = 107,
+ .period = 108,
+ .weight = 109,
+ .cpu = 110,
+ .raw_size = sizeof(raw_data),
+ .data_src = 111,
+ .transaction = 112,
+ .raw_data = (void *)raw_data,
+ .callchain = &callchain.callchain,
+ .no_hw_idx = false,
+ .branch_stack = &branch_stack.branch_stack,
+ .user_regs = {
+ .abi = PERF_SAMPLE_REGS_ABI_64,
+ .mask = sample_regs,
+ .regs = regs,
+ },
+ .user_stack = {
+ .size = sizeof(data),
+ .data = (void *)data,
+ },
+ .read = {
+ .time_enabled = 0x030a59d664fca7deULL,
+ .time_running = 0x011b6ae553eb98edULL,
+ },
+ .intr_regs = {
+ .abi = PERF_SAMPLE_REGS_ABI_64,
+ .mask = sample_regs,
+ .regs = regs,
+ },
+ .phys_addr = 113,
+ .cgroup = 114,
+ .data_page_size = 115,
+ .code_page_size = 116,
+ .aux_sample = {
+ .size = sizeof(aux_data),
+ .data = (void *)aux_data,
+ },
+ };
+ struct sample_read_value values[] = {{1, 5, 0}, {9, 3, 0}, {2, 7, 0}, {6, 4, 1},};
+ struct perf_sample sample_out, sample_out_endian;
+ size_t i, sz, bufsz;
+ int err, ret = -1;
+
+ if (sample_type & PERF_SAMPLE_REGS_USER)
+ evsel.core.attr.sample_regs_user = sample_regs;
+
+ if (sample_type & PERF_SAMPLE_REGS_INTR)
+ evsel.core.attr.sample_regs_intr = sample_regs;
+
+ if (sample_type & PERF_SAMPLE_BRANCH_STACK)
+ evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+
+ for (i = 0; i < sizeof(regs); i++)
+ *(i + (u8 *)regs) = i & 0xfe;
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ sample.read.group.nr = 4;
+ sample.read.group.values = values;
+ } else {
+ sample.read.one.value = 0x08789faeb786aa87ULL;
+ sample.read.one.id = 99;
+ sample.read.one.lost = 1;
+ }
+
+ sz = perf_event__sample_event_size(&sample, sample_type, read_format);
+ bufsz = sz + 4096; /* Add a bit for overrun checking */
+ event = malloc(bufsz);
+ if (!event) {
+ pr_debug("malloc failed\n");
+ return -1;
+ }
+
+ memset(event, 0xff, bufsz);
+ event->header.type = PERF_RECORD_SAMPLE;
+ event->header.misc = 0;
+ event->header.size = sz;
+
+ err = perf_event__synthesize_sample(event, sample_type, read_format,
+ &sample);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "perf_event__synthesize_sample", sample_type, err);
+ goto out_free;
+ }
+
+ /* The data does not contain 0xff so we use that to check the size */
+ for (i = bufsz; i > 0; i--) {
+ if (*(i - 1 + (u8 *)event) != 0xff)
+ break;
+ }
+ if (i != sz) {
+ pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
+ i, sz);
+ goto out_free;
+ }
+
+ evsel.sample_size = __evsel__sample_size(sample_type);
+
+ err = evsel__parse_sample(&evsel, event, &sample_out);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "evsel__parse_sample", sample_type, err);
+ goto out_free;
+ }
+
+ if (!samples_same(&sample, &sample_out, sample_type, read_format, evsel.needs_swap)) {
+ pr_debug("parsing failed for sample_type %#"PRIx64"\n",
+ sample_type);
+ goto out_free;
+ }
+
+ if (sample_type == PERF_SAMPLE_BRANCH_STACK) {
+ evsel.needs_swap = true;
+ evsel.sample_size = __evsel__sample_size(sample_type);
+ err = evsel__parse_sample(&evsel, event, &sample_out_endian);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "evsel__parse_sample", sample_type, err);
+ goto out_free;
+ }
+
+ if (!samples_same(&sample, &sample_out_endian, sample_type, read_format, evsel.needs_swap)) {
+ pr_debug("parsing failed for sample_type %#"PRIx64"\n",
+ sample_type);
+ goto out_free;
+ }
+ }
+
+ ret = 0;
+out_free:
+ free(event);
+ if (ret && read_format)
+ pr_debug("read_format %#"PRIx64"\n", read_format);
+ return ret;
+}
+
+/**
+ * test__sample_parsing - test sample parsing.
+ *
+ * This function implements a test that synthesizes a sample event, parses it
+ * and then checks that the parsed sample matches the original sample. The test
+ * checks sample format bits separately and together. If the test passes %0 is
+ * returned, otherwise %-1 is returned.
+ */
+static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 28, 29, 30, 31};
+ u64 sample_type;
+ u64 sample_regs;
+ size_t i;
+ int err;
+
+ /*
+ * Fail the test if it has not been updated when new sample format bits
+ * were added. Please actually update the test rather than just change
+ * the condition below.
+ */
+ if (PERF_SAMPLE_MAX > PERF_SAMPLE_WEIGHT_STRUCT << 1) {
+ pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
+ return -1;
+ }
+
+ /* Test each sample format bit separately */
+ for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
+ sample_type <<= 1) {
+ /* Test read_format variations */
+ if (sample_type == PERF_SAMPLE_READ) {
+ for (i = 0; i < ARRAY_SIZE(rf); i++) {
+ err = do_test(sample_type, 0, rf[i]);
+ if (err)
+ return err;
+ }
+ continue;
+ }
+ sample_regs = 0;
+
+ if (sample_type == PERF_SAMPLE_REGS_USER)
+ sample_regs = 0x3fff;
+
+ if (sample_type == PERF_SAMPLE_REGS_INTR)
+ sample_regs = 0xff0fff;
+
+ err = do_test(sample_type, sample_regs, 0);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Test all sample format bits together
+ * Note: PERF_SAMPLE_WEIGHT and PERF_SAMPLE_WEIGHT_STRUCT cannot
+ * be set simultaneously.
+ */
+ sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT;
+ sample_regs = 0x3fff; /* shared yb intr and user regs */
+ for (i = 0; i < ARRAY_SIZE(rf); i++) {
+ err = do_test(sample_type, sample_regs, rf[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+DEFINE_SUITE("Sample parsing", sample_parsing);
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
new file mode 100644
index 000000000..919712899
--- /dev/null
+++ b/tools/perf/tests/sdt.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/epoll.h>
+#include <util/symbol.h>
+#include <linux/filter.h>
+#include "tests.h"
+#include "debug.h"
+#include "probe-file.h"
+#include "build-id.h"
+#include "util.h"
+
+/* To test SDT event, we need libelf support to scan elf binary */
+#if defined(HAVE_SDT_EVENT) && defined(HAVE_LIBELF_SUPPORT)
+
+#include <sys/sdt.h>
+
+static int target_function(void)
+{
+ DTRACE_PROBE(perf, test_target);
+ return TEST_OK;
+}
+
+/* Copied from builtin-buildid-cache.c */
+static int build_id_cache__add_file(const char *filename)
+{
+ char sbuild_id[SBUILD_ID_SIZE];
+ struct build_id bid;
+ int err;
+
+ err = filename__read_build_id(filename, &bid);
+ if (err < 0) {
+ pr_debug("Failed to read build id of %s\n", filename);
+ return err;
+ }
+
+ build_id__sprintf(&bid, sbuild_id);
+ err = build_id_cache__add_s(sbuild_id, filename, NULL, false, false);
+ if (err < 0)
+ pr_debug("Failed to add build id cache of %s\n", filename);
+ return err;
+}
+
+static char *get_self_path(void)
+{
+ char *buf = calloc(PATH_MAX, sizeof(char));
+
+ if (buf && readlink("/proc/self/exe", buf, PATH_MAX - 1) < 0) {
+ pr_debug("Failed to get correct path of perf\n");
+ free(buf);
+ return NULL;
+ }
+ return buf;
+}
+
+static int search_cached_probe(const char *target,
+ const char *group, const char *event)
+{
+ struct probe_cache *cache = probe_cache__new(target, NULL);
+ int ret = 0;
+
+ if (!cache) {
+ pr_debug("Failed to open probe cache of %s\n", target);
+ return -EINVAL;
+ }
+
+ if (!probe_cache__find_by_name(cache, group, event)) {
+ pr_debug("Failed to find %s:%s in the cache\n", group, event);
+ ret = -ENOENT;
+ }
+ probe_cache__delete(cache);
+
+ return ret;
+}
+
+static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
+{
+ int ret = TEST_FAIL;
+ char __tempdir[] = "./test-buildid-XXXXXX";
+ char *tempdir = NULL, *myself = get_self_path();
+
+ if (myself == NULL || mkdtemp(__tempdir) == NULL) {
+ pr_debug("Failed to make a tempdir for build-id cache\n");
+ goto error;
+ }
+ /* Note that buildid_dir must be an absolute path */
+ tempdir = realpath(__tempdir, NULL);
+ if (tempdir == NULL)
+ goto error_rmdir;
+
+ /* At first, scan itself */
+ set_buildid_dir(tempdir);
+ if (build_id_cache__add_file(myself) < 0)
+ goto error_rmdir;
+
+ /* Open a cache and make sure the SDT is stored */
+ if (search_cached_probe(myself, "sdt_perf", "test_target") < 0)
+ goto error_rmdir;
+
+ /* TBD: probing on the SDT event and collect logs */
+
+ /* Call the target and get an event */
+ ret = target_function();
+
+error_rmdir:
+ /* Cleanup temporary buildid dir */
+ rm_rf(__tempdir);
+error:
+ free(tempdir);
+ free(myself);
+ return ret;
+}
+#else
+static int test__sdt_event(struct test_suite *test __maybe_unused, int subtests __maybe_unused)
+{
+ pr_debug("Skip SDT event test because SDT support is not compiled\n");
+ return TEST_SKIP;
+}
+#endif
+
+DEFINE_SUITE("Probe SDT events", sdt_event);
diff --git a/tools/perf/tests/shell/buildid.sh b/tools/perf/tests/shell/buildid.sh
new file mode 100755
index 000000000..f05670d1e
--- /dev/null
+++ b/tools/perf/tests/shell/buildid.sh
@@ -0,0 +1,158 @@
+#!/bin/sh
+# build id cache operations
+# SPDX-License-Identifier: GPL-2.0
+
+# skip if there's no readelf
+if ! [ -x "$(command -v readelf)" ]; then
+ echo "failed: no readelf, install binutils"
+ exit 2
+fi
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+# check what we need to test windows binaries
+add_pe=1
+run_pe=1
+if ! perf version --build-options | grep -q 'libbfd: .* on '; then
+ echo "WARNING: perf not built with libbfd. PE binaries will not be tested."
+ add_pe=0
+ run_pe=0
+fi
+if ! which wine > /dev/null; then
+ echo "WARNING: wine not found. PE binaries will not be run."
+ run_pe=0
+fi
+
+# set up wine
+if [ ${run_pe} -eq 1 ]; then
+ wineprefix=$(mktemp -d /tmp/perf.wineprefix.XXX)
+ export WINEPREFIX=${wineprefix}
+ # clear display variables to prevent wine from popping up dialogs
+ unset DISPLAY
+ unset WAYLAND_DISPLAY
+fi
+
+ex_md5=$(mktemp /tmp/perf.ex.MD5.XXX)
+ex_sha1=$(mktemp /tmp/perf.ex.SHA1.XXX)
+ex_pe=$(dirname $0)/../pe-file.exe
+
+echo 'int main(void) { return 0; }' | cc -Wl,--build-id=sha1 -o ${ex_sha1} -x c -
+echo 'int main(void) { return 0; }' | cc -Wl,--build-id=md5 -o ${ex_md5} -x c -
+
+echo "test binaries: ${ex_sha1} ${ex_md5} ${ex_pe}"
+
+check()
+{
+ case $1 in
+ *.exe)
+ # We don't have a tool that can pull a nicely formatted build-id out of
+ # a PE file, but we can extract the whole section with objcopy and
+ # format it ourselves. The .buildid section is a Debug Directory
+ # containing a CodeView entry:
+ # https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#debug-directory-image-only
+ # https://github.com/dotnet/runtime/blob/da94c022576a5c3bbc0e896f006565905eb137f9/docs/design/specs/PE-COFF.md
+ # The build-id starts at byte 33 and must be rearranged into a GUID.
+ id=`objcopy -O binary --only-section=.buildid $1 /dev/stdout | \
+ cut -c 33-48 | hexdump -ve '/1 "%02x"' | \
+ sed 's@^\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(..\)\(.*\)0a$@\4\3\2\1\6\5\8\7\9@'`
+ ;;
+ *)
+ id=`readelf -n ${1} 2>/dev/null | grep 'Build ID' | awk '{print $3}'`
+ ;;
+ esac
+ echo "build id: ${id}"
+
+ link=${build_id_dir}/.build-id/${id:0:2}/${id:2}
+ echo "link: ${link}"
+
+ if [ ! -h $link ]; then
+ echo "failed: link ${link} does not exist"
+ exit 1
+ fi
+
+ file=${build_id_dir}/.build-id/${id:0:2}/`readlink ${link}`/elf
+ echo "file: ${file}"
+
+ if [ ! -x $file ]; then
+ echo "failed: file ${file} does not exist"
+ exit 1
+ fi
+
+ diff ${file} ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${file} do not match"
+ exit 1
+ fi
+
+ ${perf} buildid-cache -l | grep ${id}
+ if [ $? -ne 0 ]; then
+ echo "failed: ${id} is not reported by \"perf buildid-cache -l\""
+ exit 1
+ fi
+
+ echo "OK for ${1}"
+}
+
+test_add()
+{
+ build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ ${perf} buildid-cache -v -a ${1}
+ if [ $? -ne 0 ]; then
+ echo "failed: add ${1} to build id cache"
+ exit 1
+ fi
+
+ check ${1}
+
+ rm -rf ${build_id_dir}
+}
+
+test_record()
+{
+ data=$(mktemp /tmp/perf.data.XXX)
+ build_id_dir=$(mktemp -d /tmp/perf.debug.XXX)
+ log=$(mktemp /tmp/perf.log.XXX)
+ perf="perf --buildid-dir ${build_id_dir}"
+
+ echo "running: perf record $@"
+ ${perf} record --buildid-all -o ${data} $@ &> ${log}
+ if [ $? -ne 0 ]; then
+ echo "failed: record $@"
+ echo "see log: ${log}"
+ exit 1
+ fi
+
+ check ${@: -1}
+
+ rm -f ${log}
+ rm -rf ${build_id_dir}
+ rm -rf ${data}
+}
+
+# add binaries manual via perf buildid-cache -a
+test_add ${ex_sha1}
+test_add ${ex_md5}
+if [ ${add_pe} -eq 1 ]; then
+ test_add ${ex_pe}
+fi
+
+# add binaries via perf record post processing
+test_record ${ex_sha1}
+test_record ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ test_record wine ${ex_pe}
+fi
+
+# cleanup
+rm ${ex_sha1} ${ex_md5}
+if [ ${run_pe} -eq 1 ]; then
+ rm -r ${wineprefix}
+fi
+
+exit ${err}
diff --git a/tools/perf/tests/shell/coresight/Makefile b/tools/perf/tests/shell/coresight/Makefile
new file mode 100644
index 000000000..b070e7797
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../../../../../tools/scripts/Makefile.include
+include ../../../../../tools/scripts/Makefile.arch
+include ../../../../../tools/scripts/utilities.mak
+
+SUBDIRS = \
+ asm_pure_loop \
+ memcpy_thread \
+ thread_loop \
+ unroll_loop_thread
+
+all: $(SUBDIRS)
+$(SUBDIRS):
+ @$(MAKE) -C $@ >/dev/null
+
+INSTALLDIRS = $(SUBDIRS:%=install-%)
+
+install-tests: $(INSTALLDIRS)
+$(INSTALLDIRS):
+ @$(MAKE) -C $(@:install-%=%) install-tests >/dev/null
+
+CLEANDIRS = $(SUBDIRS:%=clean-%)
+
+clean: $(CLEANDIRS)
+$(CLEANDIRS):
+ $(call QUIET_CLEAN, test-$(@:clean-%=%)) $(Q)$(MAKE) -C $(@:clean-%=%) clean >/dev/null
+
+.PHONY: all clean $(SUBDIRS) $(CLEANDIRS) $(INSTALLDIRS)
diff --git a/tools/perf/tests/shell/coresight/Makefile.miniconfig b/tools/perf/tests/shell/coresight/Makefile.miniconfig
new file mode 100644
index 000000000..5f72a9cb4
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/Makefile.miniconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+ifndef DESTDIR
+prefix ?= $(HOME)
+endif
+
+DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
+INSTALL = install
+INSTDIR_SUB = tests/shell/coresight
+
+include ../../../../../scripts/Makefile.include
+include ../../../../../scripts/Makefile.arch
+include ../../../../../scripts/utilities.mak
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop.sh b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
new file mode 100755
index 000000000..569e9d461
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / ASM Pure Loop
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="asm_pure_loop"
+. $(dirname $0)/../lib/coresight.sh
+ARGS=""
+DATV="out"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore b/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore
new file mode 100644
index 000000000..468673ac3
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore
@@ -0,0 +1 @@
+asm_pure_loop
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile b/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile
new file mode 100644
index 000000000..206849e92
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/Makefile
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=asm_pure_loop
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=
+
+all: $(BIN)
+
+$(BIN): $(BIN).S
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line - this is raw asm with no libc to have an always exact binary
+ $(Q)$(CC) $(BIN).S -nostdlib -static -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
new file mode 100644
index 000000000..75cf084a9
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tamas Zsoldos <tamas.zsoldos@arm.com>, 2021 */
+
+.globl _start
+_start:
+ mov x0, 0x0000ffff
+ mov x1, xzr
+loop:
+ nop
+ nop
+ cbnz x1, noskip
+ nop
+ nop
+ adrp x2, skip
+ add x2, x2, :lo12:skip
+ br x2
+ nop
+ nop
+noskip:
+ nop
+ nop
+skip:
+ sub x0, x0, 1
+ cbnz x0, loop
+
+ mov x0, #0
+ mov x8, #93 // __NR_exit syscall
+ svc #0
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore b/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore
new file mode 100644
index 000000000..f8217e560
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/.gitignore
@@ -0,0 +1 @@
+memcpy_thread
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/Makefile b/tools/perf/tests/shell/coresight/memcpy_thread/Makefile
new file mode 100644
index 000000000..2db637eb2
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=memcpy_thread
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
new file mode 100644
index 000000000..a7e169d1b
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+
+struct args {
+ unsigned long loops;
+ unsigned long size;
+ pthread_t th;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ unsigned long i, len = a->loops;
+ unsigned char *src, *dst;
+
+ src = malloc(a->size * 1024);
+ dst = malloc(a->size * 1024);
+ if ((!src) || (!dst)) {
+ printf("ERR: Can't allocate memory\n");
+ exit(1);
+ }
+ for (i = 0; i < len; i++)
+ memcpy(dst, src, a->size * 1024);
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned long i, len, size, thr;
+ pthread_t threads[256];
+ struct args args[256];
+ long long v;
+
+ if (argc < 4) {
+ printf("ERR: %s [copysize Kb] [numthreads] [numloops (hundreds)]\n", argv[0]);
+ exit(1);
+ }
+
+ v = atoll(argv[1]);
+ if ((v < 1) || (v > (1024 * 1024))) {
+ printf("ERR: max memory 1GB (1048576 KB)\n");
+ exit(1);
+ }
+ size = v;
+ thr = atol(argv[2]);
+ if ((thr < 1) || (thr > 256)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ v = atoll(argv[3]);
+ if ((v < 1) || (v > 40000000000ll)) {
+ printf("ERR: loops 1-40000000000 (hundreds)\n");
+ exit(1);
+ }
+ len = v * 100;
+ for (i = 0; i < thr; i++) {
+ args[i].loops = len;
+ args[i].size = size;
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
new file mode 100755
index 000000000..d21ba8545
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / Memcpy 16k 10 Threads
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="memcpy_thread"
+. $(dirname $0)/../lib/coresight.sh
+ARGS="16 10 1"
+DATV="16k_10"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/thread_loop/.gitignore b/tools/perf/tests/shell/coresight/thread_loop/.gitignore
new file mode 100644
index 000000000..6d4c33eaa
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/.gitignore
@@ -0,0 +1 @@
+thread_loop
diff --git a/tools/perf/tests/shell/coresight/thread_loop/Makefile b/tools/perf/tests/shell/coresight/thread_loop/Makefile
new file mode 100644
index 000000000..ea846c038
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=thread_loop
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
new file mode 100644
index 000000000..c0158fac7
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+// define this for gettid()
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+#include <sys/syscall.h>
+#ifndef SYS_gettid
+// gettid is 178 on arm64
+# define SYS_gettid 178
+#endif
+#define gettid() syscall(SYS_gettid)
+
+struct args {
+ unsigned int loops;
+ pthread_t th;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ int i = 0, len = a->loops;
+
+ if (getenv("SHOW_TID")) {
+ unsigned long long tid = gettid();
+
+ printf("%llu\n", tid);
+ }
+ asm volatile(
+ "loop:\n"
+ "add %[i], %[i], #1\n"
+ "cmp %[i], %[len]\n"
+ "blt loop\n"
+ : /* out */
+ : /* in */ [i] "r" (i), [len] "r" (len)
+ : /* clobber */
+ );
+ return (void *)(long)i;
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int i, len, thr;
+ pthread_t threads[256];
+ struct args args[256];
+
+ if (argc < 3) {
+ printf("ERR: %s [numthreads] [numloops (millions)]\n", argv[0]);
+ exit(1);
+ }
+
+ thr = atoi(argv[1]);
+ if ((thr < 1) || (thr > 256)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ len = atoi(argv[2]);
+ if ((len < 1) || (len > 4000)) {
+ printf("ERR: max loops 4000 (millions)\n");
+ exit(1);
+ }
+ len *= 1000000;
+ for (i = 0; i < thr; i++) {
+ args[i].loops = len;
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
new file mode 100755
index 000000000..7c13636fc
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+# CoreSight / Thread Loop 10 Threads - Check TID
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="thread_loop"
+. $(dirname $0)/../lib/coresight.sh
+ARGS="10 1"
+DATV="check-tid-10th"
+DATA="$DATD/perf-$TEST-$DATV.data"
+STDO="$DATD/perf-$TEST-$DATV.stdout"
+
+SHOW_TID=1 perf record -s $PERFRECOPT -o "$DATA" "$BIN" $ARGS > $STDO
+
+perf_dump_aux_tid_verify "$DATA" "$STDO"
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
new file mode 100755
index 000000000..a067145af
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -e
+# CoreSight / Thread Loop 2 Threads - Check TID
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="thread_loop"
+. $(dirname $0)/../lib/coresight.sh
+ARGS="2 20"
+DATV="check-tid-2th"
+DATA="$DATD/perf-$TEST-$DATV.data"
+STDO="$DATD/perf-$TEST-$DATV.stdout"
+
+SHOW_TID=1 perf record -s $PERFRECOPT -o "$DATA" "$BIN" $ARGS > $STDO
+
+perf_dump_aux_tid_verify "$DATA" "$STDO"
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore b/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore
new file mode 100644
index 000000000..2cb4e996d
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore
@@ -0,0 +1 @@
+unroll_loop_thread
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile b/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile
new file mode 100644
index 000000000..6264c4e3a
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+include ../Makefile.miniconfig
+
+# Binary to produce
+BIN=unroll_loop_thread
+# Any linking/libraries needed for the binary - empty if none needed
+LIB=-pthread
+
+all: $(BIN)
+
+$(BIN): $(BIN).c
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Build line
+ $(Q)$(CC) $(BIN).c -o $(BIN) $(LIB)
+endif
+endif
+
+install-tests: all
+ifdef CORESIGHT
+ifeq ($(ARCH),arm64)
+# Install the test tool in the right place
+ $(call QUIET_INSTALL, tests) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)'; \
+ $(INSTALL) $(BIN) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/$(INSTDIR_SUB)/$(BIN)/$(BIN)'
+endif
+endif
+
+clean:
+ $(Q)$(RM) -f $(BIN)
+
+.PHONY: all clean install-tests
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
new file mode 100644
index 000000000..8f6d38420
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+// Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <pthread.h>
+
+struct args {
+ pthread_t th;
+ unsigned int in;
+ void *ret;
+};
+
+static void *thrfn(void *arg)
+{
+ struct args *a = arg;
+ unsigned int i, in = a->in;
+
+ for (i = 0; i < 10000; i++) {
+ asm volatile (
+// force an unroll of thia add instruction so we can test long runs of code
+#define SNIP1 "add %[in], %[in], #1\n"
+// 10
+#define SNIP2 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1 SNIP1
+// 100
+#define SNIP3 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2 SNIP2
+// 1000
+#define SNIP4 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3 SNIP3
+// 10000
+#define SNIP5 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4 SNIP4
+// 100000
+ SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5 SNIP5
+ : /* out */
+ : /* in */ [in] "r" (in)
+ : /* clobber */
+ );
+ }
+}
+
+static pthread_t new_thr(void *(*fn) (void *arg), void *arg)
+{
+ pthread_t t;
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_create(&t, &attr, fn, arg);
+ return t;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int i, thr;
+ pthread_t threads[256];
+ struct args args[256];
+
+ if (argc < 2) {
+ printf("ERR: %s [numthreads]\n", argv[0]);
+ exit(1);
+ }
+
+ thr = atoi(argv[1]);
+ if ((thr > 256) || (thr < 1)) {
+ printf("ERR: threads 1-256\n");
+ exit(1);
+ }
+ for (i = 0; i < thr; i++) {
+ args[i].in = rand();
+ args[i].th = new_thr(thrfn, &(args[i]));
+ }
+ for (i = 0; i < thr; i++)
+ pthread_join(args[i].th, &(args[i].ret));
+ return 0;
+}
diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
new file mode 100755
index 000000000..f48c85230
--- /dev/null
+++ b/tools/perf/tests/shell/coresight/unroll_loop_thread_10.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -e
+# CoreSight / Unroll Loop Thread 10
+
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+TEST="unroll_loop_thread"
+. $(dirname $0)/../lib/coresight.sh
+ARGS="10"
+DATV="10"
+DATA="$DATD/perf-$TEST-$DATV.data"
+
+perf record $PERFRECOPT -o "$DATA" "$BIN" $ARGS
+
+perf_dump_aux_verify "$DATA" 10 10 10
+
+err=$?
+exit $err
diff --git a/tools/perf/tests/shell/daemon.sh b/tools/perf/tests/shell/daemon.sh
new file mode 100755
index 000000000..45fc24af5
--- /dev/null
+++ b/tools/perf/tests/shell/daemon.sh
@@ -0,0 +1,487 @@
+#!/bin/bash
+# daemon operations
+# SPDX-License-Identifier: GPL-2.0
+
+check_line_first()
+{
+ local line=$1
+ local name=$2
+ local base=$3
+ local output=$4
+ local lock=$5
+ local up=$6
+
+ local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_lock=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+
+ if [ "${name}" != "${line_name}" ]; then
+ echo "FAILED: wrong name"
+ error=1
+ fi
+
+ if [ "${base}" != "${line_base}" ]; then
+ echo "FAILED: wrong base"
+ error=1
+ fi
+
+ if [ "${output}" != "${line_output}" ]; then
+ echo "FAILED: wrong output"
+ error=1
+ fi
+
+ if [ "${lock}" != "${line_lock}" ]; then
+ echo "FAILED: wrong lock"
+ error=1
+ fi
+
+ if [ "${up}" != "${line_up}" ]; then
+ echo "FAILED: wrong up"
+ error=1
+ fi
+}
+
+check_line_other()
+{
+ local line=$1
+ local name=$2
+ local run=$3
+ local base=$4
+ local output=$5
+ local control=$6
+ local ack=$7
+ local up=$8
+
+ local line_name=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $2 }'`
+ local line_run=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $3 }'`
+ local line_base=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $4 }'`
+ local line_output=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $5 }'`
+ local line_control=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $6 }'`
+ local line_ack=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $7 }'`
+ local line_up=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $8 }'`
+
+ if [ "${name}" != "${line_name}" ]; then
+ echo "FAILED: wrong name"
+ error=1
+ fi
+
+ if [ "${run}" != "${line_run}" ]; then
+ echo "FAILED: wrong run"
+ error=1
+ fi
+
+ if [ "${base}" != "${line_base}" ]; then
+ echo "FAILED: wrong base"
+ error=1
+ fi
+
+ if [ "${output}" != "${line_output}" ]; then
+ echo "FAILED: wrong output"
+ error=1
+ fi
+
+ if [ "${control}" != "${line_control}" ]; then
+ echo "FAILED: wrong control"
+ error=1
+ fi
+
+ if [ "${ack}" != "${line_ack}" ]; then
+ echo "FAILED: wrong ack"
+ error=1
+ fi
+
+ if [ "${up}" != "${line_up}" ]; then
+ echo "FAILED: wrong up"
+ error=1
+ fi
+}
+
+daemon_exit()
+{
+ local config=$1
+
+ local line=`perf daemon --config ${config} -x: | head -1`
+ local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # Reset trap handler.
+ trap - SIGINT SIGTERM
+
+ # stop daemon
+ perf daemon stop --config ${config}
+
+ # ... and wait for the pid to go away
+ tail --pid=${pid} -f /dev/null
+}
+
+daemon_start()
+{
+ local config=$1
+ local session=$2
+
+ perf daemon start --config ${config}
+
+ # Clean up daemon if interrupted.
+ trap "echo 'FAILED: Signal caught'; daemon_exit ${config}; exit 1" SIGINT SIGTERM
+
+ # wait for the session to ping
+ local state="FAIL"
+ local retries=0
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session ${session} | awk '{ print $1 }'`
+ sleep 0.05
+ retries=$((${retries} +1))
+ if [ ${retries} -ge 600 ]; then
+ echo "FAILED: Timeout waiting for daemon to ping"
+ daemon_exit ${config}
+ exit 1
+ fi
+ done
+}
+
+test_list()
+{
+ echo "test daemon list"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # check first line
+ # pid:daemon:base:base/output:base/lock
+ local line=`perf daemon --config ${config} -x: | head -1`
+ check_line_first ${line} daemon ${base} ${base}/output ${base}/lock "0"
+
+ # check 1st session
+ # pid:size:-e cpu-clock:base/size:base/size/output:base/size/control:base/size/ack:0
+ local line=`perf daemon --config ${config} -x: | head -2 | tail -1`
+ check_line_other "${line}" size "-e cpu-clock -m 1 sleep 10" ${base}/session-size \
+ ${base}/session-size/output ${base}/session-size/control \
+ ${base}/session-size/ack "0"
+
+ # check 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control \
+ ${base}/session-time/ack "0"
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_reconfig()
+{
+ echo "test daemon reconfig"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # check 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e task-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
+ local pid=`echo "${line}" | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # prepare new config
+ local config_new=${config}.new
+ cat <<EOF > ${config_new}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e cpu-clock -m 1 sleep 10
+EOF
+
+ # TEST 1 - change config
+
+ sed -i -e "s|BASE|${base}|" ${config_new}
+ cp ${config_new} ${config}
+
+ # wait for old session to finish
+ tail --pid=${pid} -f /dev/null
+
+ # wait for new one to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ # check reconfigured 2nd session
+ # pid:time:-e task-clock:base/time:base/time/output:base/time/control:base/time/ack:0
+ local line=`perf daemon --config ${config} -x: | head -3 | tail -1`
+ check_line_other "${line}" time "-e cpu-clock -m 1 sleep 10" ${base}/session-time \
+ ${base}/session-time/output ${base}/session-time/control ${base}/session-time/ack "0"
+
+ # TEST 2 - empty config
+
+ local config_empty=${config}.empty
+ cat <<EOF > ${config_empty}
+[daemon]
+base=BASE
+EOF
+
+ # change config
+ sed -i -e "s|BASE|${base}|" ${config_empty}
+ cp ${config_empty} ${config}
+
+ # wait for sessions to finish
+ local state="OK"
+ while [ "${state}" != "FAIL" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ local state="OK"
+ while [ "${state}" != "FAIL" ]; do
+ state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ done
+
+ local one=`perf daemon --config ${config} -x: | wc -l`
+
+ if [ ${one} -ne "1" ]; then
+ echo "FAILED: wrong list output"
+ error=1
+ fi
+
+ # TEST 3 - config again
+
+ cp ${config_new} ${config}
+
+ # wait for size to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ done
+
+ # wait for time to start
+ local state="FAIL"
+ while [ "${state}" != "OK" ]; do
+ state=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+ done
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+ rm -f ${config_new}
+ rm -f ${config_empty}
+}
+
+test_stop()
+{
+ echo "test daemon stop"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ local pid_size=`perf daemon --config ${config} -x: | head -2 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+ local pid_time=`perf daemon --config ${config} -x: | head -3 | tail -1 | awk 'BEGIN { FS = ":" } ; { print $1 }'`
+
+ # check that sessions are running
+ if [ ! -d "/proc/${pid_size}" ]; then
+ echo "FAILED: session size not up"
+ fi
+
+ if [ ! -d "/proc/${pid_time}" ]; then
+ echo "FAILED: session time not up"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ # check that sessions are gone
+ if [ -d "/proc/${pid_size}" ]; then
+ echo "FAILED: session size still up"
+ fi
+
+ if [ -d "/proc/${pid_time}" ]; then
+ echo "FAILED: session time still up"
+ fi
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_signal()
+{
+ echo "test daemon signal"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-test]
+run = -e cpu-clock --switch-output -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} test
+
+ # send 2 signals
+ perf daemon signal --config ${config} --session test
+ perf daemon signal --config ${config}
+
+ # stop daemon
+ daemon_exit ${config}
+
+ # count is 2 perf.data for signals and 1 for perf record finished
+ count=`ls ${base}/session-test/ | grep perf.data | wc -l`
+ if [ ${count} -ne 3 ]; then
+ error=1
+ echo "FAILED: perf data no generated"
+ fi
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_ping()
+{
+ echo "test daemon ping"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+
+[session-time]
+run = -e task-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ size=`perf daemon ping --config ${config} --session size | awk '{ print $1 }'`
+ type=`perf daemon ping --config ${config} --session time | awk '{ print $1 }'`
+
+ if [ ${size} != "OK" -o ${type} != "OK" ]; then
+ error=1
+ echo "FAILED: daemon ping failed"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+test_lock()
+{
+ echo "test daemon lock"
+
+ local config=$(mktemp /tmp/perf.daemon.config.XXX)
+ local base=$(mktemp -d /tmp/perf.daemon.base.XXX)
+
+ # prepare config
+ cat <<EOF > ${config}
+[daemon]
+base=BASE
+
+[session-size]
+run = -e cpu-clock -m 1 sleep 10
+EOF
+
+ sed -i -e "s|BASE|${base}|" ${config}
+
+ # start daemon
+ daemon_start ${config} size
+
+ # start second daemon over the same config/base
+ failed=`perf daemon start --config ${config} 2>&1 | awk '{ print $1 }'`
+
+ # check that we failed properly
+ if [ ${failed} != "failed:" ]; then
+ error=1
+ echo "FAILED: daemon lock failed"
+ fi
+
+ # stop daemon
+ daemon_exit ${config}
+
+ rm -rf ${base}
+ rm -f ${config}
+}
+
+error=0
+
+test_list
+test_reconfig
+test_stop
+test_signal
+test_ping
+test_lock
+
+exit ${error}
diff --git a/tools/perf/tests/shell/lib/coresight.sh b/tools/perf/tests/shell/lib/coresight.sh
new file mode 100644
index 000000000..45a147725
--- /dev/null
+++ b/tools/perf/tests/shell/lib/coresight.sh
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: GPL-2.0
+# Carsten Haitzler <carsten.haitzler@arm.com>, 2021
+
+# This is sourced from a driver script so no need for #!/bin... etc. at the
+# top - the assumption below is that it runs as part of sourcing after the
+# test sets up some basic env vars to say what it is.
+
+# This currently works with ETMv4 / ETF not any other packet types at thi
+# point. This will need changes if that changes.
+
+# perf record options for the perf tests to use
+PERFRECMEM="-m ,16M"
+PERFRECOPT="$PERFRECMEM -e cs_etm//u"
+
+TOOLS=$(dirname $0)
+DIR="$TOOLS/$TEST"
+BIN="$DIR/$TEST"
+# If the test tool/binary does not exist and is executable then skip the test
+if ! test -x "$BIN"; then exit 2; fi
+DATD="."
+# If the data dir env is set then make the data dir use that instead of ./
+if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then
+ DATD="$PERF_TEST_CORESIGHT_DATADIR";
+fi
+# If the stat dir env is set then make the data dir use that instead of ./
+STATD="."
+if test -n "$PERF_TEST_CORESIGHT_STATDIR"; then
+ STATD="$PERF_TEST_CORESIGHT_STATDIR";
+fi
+
+# Called if the test fails - error code 1
+err() {
+ echo "$1"
+ exit 1
+}
+
+# Check that some statistics from our perf
+check_val_min() {
+ STATF="$4"
+ if test "$2" -lt "$3"; then
+ echo ", FAILED" >> "$STATF"
+ err "Sanity check number of $1 is too low ($2 < $3)"
+ fi
+}
+
+perf_dump_aux_verify() {
+ # Some basic checking that the AUX chunk contains some sensible data
+ # to see that we are recording something and at least a minimum
+ # amount of it. We should almost always see Fn packets in just about
+ # anything but certainly we will see some trace info and async
+ # packets
+ DUMP="$DATD/perf-tmp-aux-dump.txt"
+ perf report --stdio --dump -i "$1" | \
+ grep -o -e I_ATOM_F -e I_ASYNC -e I_TRACE_INFO > "$DUMP"
+ # Simply count how many of these packets we find to see that we are
+ # producing a reasonable amount of data - exact checks are not sane
+ # as this is a lossy process where we may lose some blocks and the
+ # compiler may produce different code depending on the compiler and
+ # optimization options, so this is rough just to see if we're
+ # either missing almost all the data or all of it
+ ATOM_FX_NUM=`grep I_ATOM_F "$DUMP" | wc -l`
+ ASYNC_NUM=`grep I_ASYNC "$DUMP" | wc -l`
+ TRACE_INFO_NUM=`grep I_TRACE_INFO "$DUMP" | wc -l`
+ rm -f "$DUMP"
+
+ # Arguments provide minimums for a pass
+ CHECK_FX_MIN="$2"
+ CHECK_ASYNC_MIN="$3"
+ CHECK_TRACE_INFO_MIN="$4"
+
+ # Write out statistics, so over time you can track results to see if
+ # there is a pattern - for example we have less "noisy" results that
+ # produce more consistent amounts of data each run, to see if over
+ # time any techinques to minimize data loss are having an effect or
+ # not
+ STATF="$STATD/stats-$TEST-$DATV.csv"
+ if ! test -f "$STATF"; then
+ echo "ATOM Fx Count, Minimum, ASYNC Count, Minimum, TRACE INFO Count, Minimum" > "$STATF"
+ fi
+ echo -n "$ATOM_FX_NUM, $CHECK_FX_MIN, $ASYNC_NUM, $CHECK_ASYNC_MIN, $TRACE_INFO_NUM, $CHECK_TRACE_INFO_MIN" >> "$STATF"
+
+ # Actually check to see if we passed or failed.
+ check_val_min "ATOM_FX" "$ATOM_FX_NUM" "$CHECK_FX_MIN" "$STATF"
+ check_val_min "ASYNC" "$ASYNC_NUM" "$CHECK_ASYNC_MIN" "$STATF"
+ check_val_min "TRACE_INFO" "$TRACE_INFO_NUM" "$CHECK_TRACE_INFO_MIN" "$STATF"
+ echo ", Ok" >> "$STATF"
+}
+
+perf_dump_aux_tid_verify() {
+ # Specifically crafted test will produce a list of Tread ID's to
+ # stdout that need to be checked to see that they have had trace
+ # info collected in AUX blocks in the perf data. This will go
+ # through all the TID's that are listed as CID=0xabcdef and see
+ # that all the Thread IDs the test tool reports are in the perf
+ # data AUX chunks
+
+ # The TID test tools will print a TID per stdout line that are being
+ # tested
+ TIDS=`cat "$2"`
+ # Scan the perf report to find the TIDs that are actually CID in hex
+ # and build a list of the ones found
+ FOUND_TIDS=`perf report --stdio --dump -i "$1" | \
+ grep -o "CID=0x[0-9a-z]\+" | sed 's/CID=//g' | \
+ uniq | sort | uniq`
+ # No CID=xxx found - maybe your kernel is reporting these as
+ # VMID=xxx so look there
+ if test -z "$FOUND_TIDS"; then
+ FOUND_TIDS=`perf report --stdio --dump -i "$1" | \
+ grep -o "VMID=0x[0-9a-z]\+" | sed 's/VMID=//g' | \
+ uniq | sort | uniq`
+ fi
+
+ # Iterate over the list of TIDs that the test says it has and find
+ # them in the TIDs found in the perf report
+ MISSING=""
+ for TID2 in $TIDS; do
+ FOUND=""
+ for TIDHEX in $FOUND_TIDS; do
+ TID=`printf "%i" $TIDHEX`
+ if test "$TID" -eq "$TID2"; then
+ FOUND="y"
+ break
+ fi
+ done
+ if test -z "$FOUND"; then
+ MISSING="$MISSING $TID"
+ fi
+ done
+ if test -n "$MISSING"; then
+ err "Thread IDs $MISSING not found in perf AUX data"
+ fi
+}
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
new file mode 100644
index 000000000..d90f8d102
--- /dev/null
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Basic sanity check of perf JSON output as specified in the man page.
+
+import argparse
+import sys
+import json
+
+ap = argparse.ArgumentParser()
+ap.add_argument('--no-args', action='store_true')
+ap.add_argument('--interval', action='store_true')
+ap.add_argument('--system-wide-no-aggr', action='store_true')
+ap.add_argument('--system-wide', action='store_true')
+ap.add_argument('--event', action='store_true')
+ap.add_argument('--per-core', action='store_true')
+ap.add_argument('--per-thread', action='store_true')
+ap.add_argument('--per-die', action='store_true')
+ap.add_argument('--per-node', action='store_true')
+ap.add_argument('--per-socket', action='store_true')
+args = ap.parse_args()
+
+Lines = sys.stdin.readlines()
+
+def isfloat(num):
+ try:
+ float(num)
+ return True
+ except ValueError:
+ return False
+
+
+def isint(num):
+ try:
+ int(num)
+ return True
+ except ValueError:
+ return False
+
+def is_counter_value(num):
+ return isfloat(num) or num == '<not counted>' or num == '<not supported>'
+
+def check_json_output(expected_items):
+ if expected_items != -1:
+ for line in Lines:
+ if 'failed' not in line:
+ count = 0
+ count = line.count(',')
+ if count != expected_items and count >= 1 and count <= 3 and 'metric-value' in line:
+ # Events that generate >1 metric may have isolated metric
+ # values and possibly other prefixes like interval, core and
+ # aggregate-number.
+ continue
+ if count != expected_items:
+ raise RuntimeError(f'wrong number of fields. counted {count} expected {expected_items}'
+ f' in \'{line}\'')
+ checks = {
+ 'aggregate-number': lambda x: isfloat(x),
+ 'core': lambda x: True,
+ 'counter-value': lambda x: is_counter_value(x),
+ 'cgroup': lambda x: True,
+ 'cpu': lambda x: isint(x),
+ 'die': lambda x: True,
+ 'event': lambda x: True,
+ 'event-runtime': lambda x: isfloat(x),
+ 'interval': lambda x: isfloat(x),
+ 'metric-unit': lambda x: True,
+ 'metric-value': lambda x: isfloat(x),
+ 'node': lambda x: True,
+ 'pcnt-running': lambda x: isfloat(x),
+ 'socket': lambda x: True,
+ 'thread': lambda x: True,
+ 'unit': lambda x: True,
+ }
+ input = '[\n' + ','.join(Lines) + '\n]'
+ for item in json.loads(input):
+ for key, value in item.items():
+ if key not in checks:
+ raise RuntimeError(f'Unexpected key: key={key} value={value}')
+ if not checks[key](value):
+ raise RuntimeError(f'Check failed for: key={key} value={value}')
+
+
+try:
+ if args.no_args or args.system_wide or args.event:
+ expected_items = 6
+ elif args.interval or args.per_thread or args.system_wide_no_aggr:
+ expected_items = 7
+ elif args.per_core or args.per_socket or args.per_node or args.per_die:
+ expected_items = 8
+ else:
+ # If no option is specified, don't check the number of items.
+ expected_items = -1
+ check_json_output(expected_items)
+except:
+ print('Test failed for input:\n' + '\n'.join(Lines))
+ raise
diff --git a/tools/perf/tests/shell/lib/probe.sh b/tools/perf/tests/shell/lib/probe.sh
new file mode 100644
index 000000000..51e3f60ba
--- /dev/null
+++ b/tools/perf/tests/shell/lib/probe.sh
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+skip_if_no_perf_probe() {
+ perf probe 2>&1 | grep -q 'is not a perf-command' && return 2
+ return 0
+}
+
+skip_if_no_perf_trace() {
+ perf trace -h 2>&1 | grep -q -e 'is not a perf-command' -e 'trace command not available' && return 2
+ return 0
+}
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
new file mode 100644
index 000000000..b616d42bd
--- /dev/null
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -0,0 +1,24 @@
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+perf probe -l 2>&1 | grep -q probe:vfs_getname
+had_vfs_getname=$?
+
+cleanup_probe_vfs_getname() {
+ if [ $had_vfs_getname -eq 1 ] ; then
+ perf probe -q -d probe:vfs_getname*
+ fi
+}
+
+add_probe_vfs_getname() {
+ local verbose=$1
+ if [ $had_vfs_getname -eq 1 ] ; then
+ line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ fi
+}
+
+skip_if_no_debuginfo() {
+ add_probe_vfs_getname -v 2>&1 | egrep -q "^(Failed to find the path for the kernel|Debuginfo-analysis is not supported)|(file has no debug information)" && return 2
+ return 1
+}
diff --git a/tools/perf/tests/shell/lib/waiting.sh b/tools/perf/tests/shell/lib/waiting.sh
new file mode 100644
index 000000000..e7a39134a
--- /dev/null
+++ b/tools/perf/tests/shell/lib/waiting.sh
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tenths=date\ +%s%1N
+
+# Wait for PID $1 to have $2 number of threads started
+# Time out after $3 tenths of a second or 5 seconds if $3 is ""
+wait_for_threads()
+{
+ tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50
+ start_time=$($tenths)
+ while [ -e "/proc/$1/task" ] ; do
+ th_cnt=$(find "/proc/$1/task" -mindepth 1 -maxdepth 1 -printf x | wc -c)
+ if [ "${th_cnt}" -ge "$2" ] ; then
+ return 0
+ fi
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "PID $1 does not have $2 threads"
+ return 1
+ fi
+ done
+ return 1
+}
+
+# Wait for perf record -vvv 2>$2 with PID $1 to start by looking at file $2
+# It depends on capturing perf record debug message "perf record has started"
+# Time out after $3 tenths of a second or 5 seconds if $3 is ""
+wait_for_perf_to_start()
+{
+ tm_out=$3 ; [ -n "${tm_out}" ] || tm_out=50
+ echo "Waiting for \"perf record has started\" message"
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ if grep -q "perf record has started" "$2" ; then
+ echo OK
+ break
+ fi
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "perf recording did not start"
+ return 1
+ fi
+ done
+ return 0
+}
+
+# Wait for process PID %1 to exit
+# Time out after $2 tenths of a second or 5 seconds if $2 is ""
+wait_for_process_to_exit()
+{
+ tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=50
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ # Wait at most tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -ge $tm_out ] ; then
+ echo "PID $1 did not exit as expected"
+ return 1
+ fi
+ done
+ return 0
+}
+
+# Check if PID $1 is still running after $2 tenths of a second
+# or 0.3 seconds if $2 is ""
+is_running()
+{
+ tm_out=$2 ; [ -n "${tm_out}" ] || tm_out=3
+ start_time=$($tenths)
+ while [ -e "/proc/$1" ] ; do
+ # Check for at least tm_out tenths of a second
+ if [ $(($($tenths) - start_time)) -gt $tm_out ] ; then
+ return 0
+ fi
+ done
+ echo "PID $1 exited prematurely"
+ return 1
+}
diff --git a/tools/perf/tests/shell/lock_contention.sh b/tools/perf/tests/shell/lock_contention.sh
new file mode 100755
index 000000000..04bf604e3
--- /dev/null
+++ b/tools/perf/tests/shell/lock_contention.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+# kernel lock contention analysis test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+result=$(mktemp /tmp/__perf_test.result.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${result}
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit ${err}
+}
+trap trap_cleanup exit term int
+
+check() {
+ if [ `id -u` != 0 ]; then
+ echo "[Skip] No root permission"
+ err=2
+ exit
+ fi
+
+ if ! perf list | grep -q lock:contention_begin; then
+ echo "[Skip] No lock contention tracepoints"
+ err=2
+ exit
+ fi
+}
+
+test_record()
+{
+ echo "Testing perf lock record and perf lock contention"
+ perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
+ # the output goes to the stderr and we expect only 1 output (-E 1)
+ perf lock contention -i ${perfdata} -E 1 -q 2> ${result}
+ if [ $(cat "${result}" | wc -l) != "1" ]; then
+ echo "[Fail] Recorded result count is not 1:" $(cat "${result}" | wc -l)
+ err=1
+ exit
+ fi
+}
+
+test_bpf()
+{
+ echo "Testing perf lock contention --use-bpf"
+
+ if ! perf lock con -b true > /dev/null 2>&1 ; then
+ echo "[Skip] No BPF support"
+ exit
+ fi
+
+ # the perf lock contention output goes to the stderr
+ perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
+ if [ $(cat "${result}" | wc -l) != "1" ]; then
+ echo "[Fail] BPF result count is not 1:" $(cat "${result}" | wc -l)
+ err=1
+ exit
+ fi
+}
+
+check
+
+test_record
+test_bpf
+
+exit ${err}
diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh
new file mode 100755
index 000000000..1b32b4f28
--- /dev/null
+++ b/tools/perf/tests/shell/pipe_test.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+# perf pipe recording and injection test
+# SPDX-License-Identifier: GPL-2.0
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+file=$(mktemp /tmp/test.file.XXXXXX)
+data=$(mktemp /tmp/perf.data.XXXXXX)
+
+cat <<EOF | cc -o ${file} -x c -
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+volatile int done;
+
+void sigalrm(int sig) {
+ done = 1;
+}
+
+__attribute__((noinline)) void noploop(void) {
+ while (!done)
+ continue;
+}
+
+int main(int argc, char *argv[]) {
+ int sec = 1;
+
+ if (argc > 1)
+ sec = atoi(argv[1]);
+
+ signal(SIGALRM, sigalrm);
+ alarm(sec);
+
+ noploop();
+ return 0;
+}
+EOF
+
+
+if ! perf record -e task-clock:u -o - ${file} | perf report -i - --task | grep test.file; then
+ echo "cannot find the test file in the perf report"
+ exit 1
+fi
+
+if ! perf record -e task-clock:u -o - ${file} | perf inject -b | perf report -i - | grep noploop; then
+ echo "cannot find noploop function in pipe #1"
+ exit 1
+fi
+
+perf record -e task-clock:u -o - ${file} | perf inject -b -o ${data}
+if ! perf report -i ${data} | grep noploop; then
+ echo "cannot find noploop function in pipe #2"
+ exit 1
+fi
+
+perf record -e task-clock:u -o ${data} ${file}
+if ! perf inject -b -i ${data} | perf report -i - | grep noploop; then
+ echo "cannot find noploop function in pipe #3"
+ exit 1
+fi
+
+
+rm -f ${file} ${data} ${data}.old
+exit 0
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
new file mode 100755
index 000000000..5d1b63d3f
--- /dev/null
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# Add vfs_getname probe to get syscall args filenames
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
+. $(dirname $0)/lib/probe_vfs_getname.sh
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+cleanup_probe_vfs_getname
+exit $err
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
new file mode 100755
index 000000000..f12a4e217
--- /dev/null
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+# probe libc's inet_pton & backtrace it with ping
+
+# Installs a probe on libc's inet_pton function, that will use uprobes,
+# then use 'perf trace' on a ping to localhost asking for just one packet
+# with the a backtrace 3 levels deep, check that it is what we expect.
+# This needs no debuginfo package, all is done using the libc ELF symtab
+# and the CFI info in the binaries.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. $(dirname $0)/lib/probe.sh
+
+libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1/g')
+nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
+
+event_pattern='probe_libc:inet_pton(\_[[:digit:]]+)?'
+
+add_libc_inet_pton_event() {
+
+ event_name=$(perf probe -f -x $libc -a inet_pton 2>&1 | tail -n +2 | head -n -5 | \
+ grep -P -o "$event_pattern(?=[[:space:]]\(on inet_pton in $libc\))")
+
+ if [ $? -ne 0 -o -z "$event_name" ] ; then
+ printf "FAIL: could not add event\n"
+ return 1
+ fi
+}
+
+trace_libc_inet_pton_backtrace() {
+
+ expected=`mktemp -u /tmp/expected.XXX`
+
+ echo "ping[][0-9 \.:]+$event_name: \([[:xdigit:]]+\)" > $expected
+ echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ case "$(uname -m)" in
+ s390x)
+ eventattr='call-graph=dwarf,max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ ppc64|ppc64le)
+ eventattr='max-stack=4'
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ *)
+ eventattr='max-stack=3'
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
+ ;;
+ esac
+
+ perf_data=`mktemp -u /tmp/perf.data.XXX`
+ perf_script=`mktemp -u /tmp/perf.script.XXX`
+ perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+ perf script -i $perf_data > $perf_script
+
+ exec 3<$perf_script
+ exec 4<$expected
+ while read line <&3 && read -r pattern <&4; do
+ [ -z "$pattern" ] && break
+ echo $line
+ echo "$line" | egrep -q "$pattern"
+ if [ $? -ne 0 ] ; then
+ printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
+ return 1
+ fi
+ done
+
+ # If any statements are executed from this point onwards,
+ # the exit code of the last among these will be reflected
+ # in err below. If the exit code is 0, the test will pass
+ # even if the perf script output does not match.
+}
+
+delete_libc_inet_pton_event() {
+
+ if [ -n "$event_name" ] ; then
+ perf probe -q -d $event_name
+ fi
+}
+
+# Check for IPv6 interface existence
+ip a sh lo | fgrep -q inet6 || exit 2
+
+skip_if_no_perf_probe && \
+add_libc_inet_pton_event && \
+trace_libc_inet_pton_backtrace
+err=$?
+rm -f ${perf_data} ${perf_script} ${expected}
+delete_libc_inet_pton_event
+exit $err
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
new file mode 100755
index 000000000..8d9c04e45
--- /dev/null
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# Use vfs_getname probe to get syscall args filenames
+
+# Uses the 'perf test shell' library to add probe:vfs_getname to the system
+# then use it with 'perf record' using 'touch' to write to a temp file, then
+# checks that that was captured by the vfs_getname probe in the generated
+# perf.data file, with the temp file name as the pathname argument.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+
+. $(dirname $0)/lib/probe_vfs_getname.sh
+
+record_open_file() {
+ echo "Recording open file:"
+ perf record -o ${perfdata} -e probe:vfs_getname\* touch $file
+}
+
+perf_script_filenames() {
+ echo "Looking at perf.data file for vfs_getname records for the file we touched:"
+ perf script -i ${perfdata} | \
+ egrep " +touch +[0-9]+ +\[[0-9]+\] +[0-9]+\.[0-9]+: +probe:vfs_getname[_0-9]*: +\([[:xdigit:]]+\) +pathname=\"${file}\""
+}
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+if [ $err -ne 0 ] ; then
+ exit $err
+fi
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+record_open_file && perf_script_filenames
+err=$?
+rm -f ${perfdata}
+rm -f ${file}
+cleanup_probe_vfs_getname
+exit $err
diff --git a/tools/perf/tests/shell/record+zstd_comp_decomp.sh b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
new file mode 100755
index 000000000..49bd875d5
--- /dev/null
+++ b/tools/perf/tests/shell/record+zstd_comp_decomp.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# Zstd perf.data compression/decompression
+
+# SPDX-License-Identifier: GPL-2.0
+
+trace_file=$(mktemp /tmp/perf.data.XXX)
+perf_tool=perf
+
+skip_if_no_z_record() {
+ $perf_tool record -h 2>&1 | grep -q '\-z, \-\-compression\-level'
+}
+
+collect_z_record() {
+ echo "Collecting compressed record file:"
+ [ "$(uname -m)" != s390x ] && gflag='-g'
+ $perf_tool record -o $trace_file $gflag -z -F 5000 -- \
+ dd count=500 if=/dev/urandom of=/dev/null
+}
+
+check_compressed_stats() {
+ echo "Checking compressed events stats:"
+ $perf_tool report -i $trace_file --header --stats | \
+ grep -E "(# compressed : Zstd,)|(COMPRESSED events:)"
+}
+
+check_compressed_output() {
+ $perf_tool inject -i $trace_file -o $trace_file.decomp &&
+ $perf_tool report -i $trace_file --stdio -F comm,dso,sym | head -n -3 > $trace_file.comp.output &&
+ $perf_tool report -i $trace_file.decomp --stdio -F comm,dso,sym | head -n -3 > $trace_file.decomp.output &&
+ diff $trace_file.comp.output $trace_file.decomp.output
+}
+
+skip_if_no_z_record || exit 2
+collect_z_record && check_compressed_stats && check_compressed_output
+err=$?
+rm -f $trace_file*
+exit $err
diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh
new file mode 100755
index 000000000..301f95427
--- /dev/null
+++ b/tools/perf/tests/shell/record.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+# perf record tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+test_per_thread() {
+ echo "Basic --per-thread mode test"
+ if ! perf record -e instructions:u -o ${perfdata} --quiet true 2> /dev/null
+ then
+ echo "Per-thread record [Skipped instructions:u not supported]"
+ if [ $err -ne 1 ]
+ then
+ err=2
+ fi
+ return
+ fi
+ if ! perf record -e instructions:u --per-thread -o ${perfdata} true 2> /dev/null
+ then
+ echo "Per-thread record of instructions:u [Failed]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q | egrep -q true
+ then
+ echo "Per-thread record [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic --per-thread mode test [Success]"
+}
+
+test_register_capture() {
+ echo "Register capture test"
+ if ! perf list | egrep -q 'br_inst_retired.near_call'
+ then
+ echo "Register capture test [Skipped missing instruction]"
+ if [ $err -ne 1 ]
+ then
+ err=2
+ fi
+ return
+ fi
+ if ! perf record --intr-regs=\? 2>&1 | egrep -q 'available registers: AX BX CX DX SI DI BP SP IP FLAGS CS SS R8 R9 R10 R11 R12 R13 R14 R15'
+ then
+ echo "Register capture test [Skipped missing registers]"
+ return
+ fi
+ if ! perf record -o - --intr-regs=di,r8,dx,cx -e br_inst_retired.near_call:p \
+ -c 1000 --per-thread true 2> /dev/null \
+ | perf script -F ip,sym,iregs -i - 2> /dev/null \
+ | egrep -q "DI:"
+ then
+ echo "Register capture test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Register capture test [Success]"
+}
+
+test_per_thread
+test_register_capture
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh
new file mode 100755
index 000000000..054272750
--- /dev/null
+++ b/tools/perf/tests/shell/record_offcpu.sh
@@ -0,0 +1,103 @@
+#!/bin/sh
+# perf record offcpu profiling tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+cleanup() {
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ trap - exit term int
+}
+
+trap_cleanup() {
+ cleanup
+ exit 1
+}
+trap trap_cleanup exit term int
+
+test_offcpu_priv() {
+ echo "Checking off-cpu privilege"
+
+ if [ `id -u` != 0 ]
+ then
+ echo "off-cpu test [Skipped permission]"
+ err=2
+ return
+ fi
+ if perf record --off-cpu -o /dev/null --quiet true 2>&1 | grep BUILD_BPF_SKEL
+ then
+ echo "off-cpu test [Skipped missing BPF support]"
+ err=2
+ return
+ fi
+}
+
+test_offcpu_basic() {
+ echo "Basic off-cpu test"
+
+ if ! perf record --off-cpu -e dummy -o ${perfdata} sleep 1 2> /dev/null
+ then
+ echo "Basic off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Basic off-cpu test [Failed no event]"
+ err=1
+ return
+ fi
+ if ! perf report -i ${perfdata} -q --percent-limit=90 | egrep -q sleep
+ then
+ echo "Basic off-cpu test [Failed missing output]"
+ err=1
+ return
+ fi
+ echo "Basic off-cpu test [Success]"
+}
+
+test_offcpu_child() {
+ echo "Child task off-cpu test"
+
+ # perf bench sched messaging creates 400 processes
+ if ! perf record --off-cpu -e dummy -o ${perfdata} -- \
+ perf bench sched messaging -g 10 > /dev/null 2>&1
+ then
+ echo "Child task off-cpu test [Failed record]"
+ err=1
+ return
+ fi
+ if ! perf evlist -i ${perfdata} | grep -q "offcpu-time"
+ then
+ echo "Child task off-cpu test [Failed no event]"
+ err=1
+ return
+ fi
+ # each process waits for read and write, so it should be more than 800 events
+ if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \
+ awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }'
+ then
+ echo "Child task off-cpu test [Failed invalid output]"
+ err=1
+ return
+ fi
+ echo "Child task off-cpu test [Success]"
+}
+
+
+test_offcpu_priv
+
+if [ $err = 0 ]; then
+ test_offcpu_basic
+fi
+
+if [ $err = 0 ]; then
+ test_offcpu_child
+fi
+
+cleanup
+exit $err
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
new file mode 100755
index 000000000..b7f050aa6
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -0,0 +1,204 @@
+#!/bin/bash
+# perf stat CSV output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Tests various perf stat CSV output commands for the
+# correct number of fields and the CSV separator set to ','.
+
+set -e
+
+skip_test=0
+
+function commachecker()
+{
+ local -i cnt=0
+ local exp=0
+
+ case "$1"
+ in "--no-args") exp=6
+ ;; "--system-wide") exp=6
+ ;; "--event") exp=6
+ ;; "--interval") exp=7
+ ;; "--per-thread") exp=7
+ ;; "--system-wide-no-aggr") exp=7
+ [ $(uname -m) = "s390x" ] && exp='^[6-7]$'
+ ;; "--per-core") exp=8
+ ;; "--per-socket") exp=8
+ ;; "--per-node") exp=8
+ ;; "--per-die") exp=8
+ esac
+
+ while read line
+ do
+ # Check for lines beginning with Failed
+ x=${line:0:6}
+ [ "$x" = "Failed" ] && continue
+
+ # Count the number of commas
+ x=$(echo $line | tr -d -c ',')
+ cnt="${#x}"
+ # echo $line $cnt
+ [[ ! "$cnt" =~ $exp ]] && {
+ echo "wrong number of fields. expected $exp in $line" 1>&2
+ exit 1;
+ }
+ done
+ return 0
+}
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking CSV output: no args "
+ perf stat -x, true 2>&1 | commachecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, -a true 2>&1 | commachecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking CSV output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ echo -n "Checking CSV output: system wide no aggregation "
+ perf stat -x, -A -a --no-merge true 2>&1 | commachecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking CSV output: interval "
+ perf stat -x, -I 1000 true 2>&1 | commachecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking CSV output: event "
+ perf stat -x, -e cpu-clock true 2>&1 | commachecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking CSV output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-core -a true 2>&1 | commachecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking CSV output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-thread -a true 2>&1 | commachecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking CSV output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-die -a true 2>&1 | commachecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking CSV output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-node -a true 2>&1 | commachecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking CSV output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat -x, --per-socket -a true 2>&1 | commachecker --per-socket
+ echo "[Success]"
+}
+
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && return 0
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && skip_test=1
+ return 0
+ fi
+}
+
+check_for_topology
+check_no_args
+check_system_wide
+check_interval
+check_event
+check_per_thread
+check_per_node
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr
+ check_per_core
+ check_per_die
+ check_per_socket
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+exit 0
diff --git a/tools/perf/tests/shell/stat+csv_summary.sh b/tools/perf/tests/shell/stat+csv_summary.sh
new file mode 100755
index 000000000..5571ff75e
--- /dev/null
+++ b/tools/perf/tests/shell/stat+csv_summary.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+# perf stat csv summary test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+#
+# 1.001364330 9224197 cycles 8012885033 100.00
+# summary 9224197 cycles 8012885033 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary 2>&1 | \
+grep -e summary | \
+while read summary num event run pct
+do
+ if [ $summary != "summary" ]; then
+ exit 1
+ fi
+done
+
+#
+# 1.001360298 9148534 cycles 8012853854 100.00
+#9148534 cycles 8012853854 100.00
+#
+perf stat -e cycles -x' ' -I1000 --interval-count 1 --summary --no-csv-summary 2>&1 | \
+grep -e summary | \
+while read num event run pct
+do
+ exit 1
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
new file mode 100755
index 000000000..2c4212c64
--- /dev/null
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -0,0 +1,182 @@
+#!/bin/bash
+# perf stat JSON output linter
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Checks various perf stat JSON output commands for the
+# correct number of fields.
+
+set -e
+
+skip_test=0
+
+pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py
+if [ "x$PYTHON" == "x" ]
+then
+ if which python3 > /dev/null
+ then
+ PYTHON=python3
+ elif which python > /dev/null
+ then
+ PYTHON=python
+ else
+ echo Skipping test, python not detected please set environment variable PYTHON.
+ exit 2
+ fi
+fi
+
+# Return true if perf_event_paranoid is > $1 and not running as root.
+function ParanoidAndNotRoot()
+{
+ [ $(id -u) != 0 ] && [ $(cat /proc/sys/kernel/perf_event_paranoid) -gt $1 ]
+}
+
+check_no_args()
+{
+ echo -n "Checking json output: no args "
+ perf stat -j true 2>&1 | $PYTHON $pythonchecker --no-args
+ echo "[Success]"
+}
+
+check_system_wide()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j -a true 2>&1 | $PYTHON $pythonchecker --system-wide
+ echo "[Success]"
+}
+
+check_system_wide_no_aggr()
+{
+ echo -n "Checking json output: system wide "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ echo -n "Checking json output: system wide no aggregation "
+ perf stat -j -A -a --no-merge true 2>&1 | $PYTHON $pythonchecker --system-wide-no-aggr
+ echo "[Success]"
+}
+
+check_interval()
+{
+ echo -n "Checking json output: interval "
+ perf stat -j -I 1000 true 2>&1 | $PYTHON $pythonchecker --interval
+ echo "[Success]"
+}
+
+
+check_event()
+{
+ echo -n "Checking json output: event "
+ perf stat -j -e cpu-clock true 2>&1 | $PYTHON $pythonchecker --event
+ echo "[Success]"
+}
+
+check_per_core()
+{
+ echo -n "Checking json output: per core "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-core -a true 2>&1 | $PYTHON $pythonchecker --per-core
+ echo "[Success]"
+}
+
+check_per_thread()
+{
+ echo -n "Checking json output: per thread "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-thread -a true 2>&1 | $PYTHON $pythonchecker --per-thread
+ echo "[Success]"
+}
+
+check_per_die()
+{
+ echo -n "Checking json output: per die "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-die -a true 2>&1 | $PYTHON $pythonchecker --per-die
+ echo "[Success]"
+}
+
+check_per_node()
+{
+ echo -n "Checking json output: per node "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-node -a true 2>&1 | $PYTHON $pythonchecker --per-node
+ echo "[Success]"
+}
+
+check_per_socket()
+{
+ echo -n "Checking json output: per socket "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-socket -a true 2>&1 | $PYTHON $pythonchecker --per-socket
+ echo "[Success]"
+}
+
+# The perf stat options for per-socket, per-core, per-die
+# and -A ( no_aggr mode ) uses the info fetched from this
+# directory: "/sys/devices/system/cpu/cpu*/topology". For
+# example, socket value is fetched from "physical_package_id"
+# file in topology directory.
+# Reference: cpu__get_topology_int in util/cpumap.c
+# If the platform doesn't expose topology information, values
+# will be set to -1. For example, incase of pSeries platform
+# of powerpc, value for "physical_package_id" is restricted
+# and set to -1. Check here validates the socket-id read from
+# topology file before proceeding further
+
+FILE_LOC="/sys/devices/system/cpu/cpu*/topology/"
+FILE_NAME="physical_package_id"
+
+check_for_topology()
+{
+ if ! ParanoidAndNotRoot 0
+ then
+ socket_file=`ls $FILE_LOC/$FILE_NAME | head -n 1`
+ [ -z $socket_file ] && return 0
+ socket_id=`cat $socket_file`
+ [ $socket_id == -1 ] && skip_test=1
+ return 0
+ fi
+}
+
+check_for_topology
+check_no_args
+check_system_wide
+check_interval
+check_event
+check_per_thread
+check_per_node
+if [ $skip_test -ne 1 ]
+then
+ check_system_wide_no_aggr
+ check_per_core
+ check_per_die
+ check_per_socket
+else
+ echo "[Skip] Skipping tests for system_wide_no_aggr, per_core, per_die and per_socket since socket id exposed via topology is invalid"
+fi
+exit 0
diff --git a/tools/perf/tests/shell/stat+shadow_stat.sh b/tools/perf/tests/shell/stat+shadow_stat.sh
new file mode 100755
index 000000000..e6e35fc6c
--- /dev/null
+++ b/tools/perf/tests/shell/stat+shadow_stat.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+# perf stat metrics (shadow stat) test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# skip if system-wide mode is forbidden
+perf stat -a true > /dev/null 2>&1 || exit 2
+
+# skip if on hybrid platform
+perf stat -a -e cycles sleep 1 2>&1 | grep -e cpu_core && exit 2
+
+test_global_aggr()
+{
+ perf stat -a --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ grep -e cycles -e instructions | \
+ while read num evt hash ipc rest
+ do
+ # skip not counted events
+ if [ "$num" = "<not" ]; then
+ continue
+ fi
+
+ # save cycles count
+ if [ "$evt" = "cycles" ]; then
+ cyc=$num
+ continue
+ fi
+
+ # skip if no cycles
+ if [ -z "$cyc" ]; then
+ continue
+ fi
+
+ # use printf for rounding and a leading zero
+ res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ if [ "$ipc" != "$res" ]; then
+ echo "IPC is different: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+ done
+}
+
+test_no_aggr()
+{
+ perf stat -a -A --no-big-num -e cycles,instructions sleep 1 2>&1 | \
+ grep ^CPU | \
+ while read cpu num evt hash ipc rest
+ do
+ # skip not counted events
+ if [ "$num" = "<not" ]; then
+ continue
+ fi
+
+ # save cycles count
+ if [ "$evt" = "cycles" ]; then
+ results="$results $cpu:$num"
+ continue
+ fi
+
+ cyc=${results##* $cpu:}
+ cyc=${cyc%% *}
+
+ # skip if no cycles
+ if [ -z "$cyc" ]; then
+ continue
+ fi
+
+ # use printf for rounding and a leading zero
+ res=`printf "%.2f" $(echo "scale=6; $num / $cyc" | bc -q)`
+ if [ "$ipc" != "$res" ]; then
+ echo "IPC is different for $cpu: $res != $ipc ($num / $cyc)"
+ exit 1
+ fi
+ done
+}
+
+test_global_aggr
+test_no_aggr
+
+exit 0
diff --git a/tools/perf/tests/shell/stat.sh b/tools/perf/tests/shell/stat.sh
new file mode 100755
index 000000000..26a51b48a
--- /dev/null
+++ b/tools/perf/tests/shell/stat.sh
@@ -0,0 +1,99 @@
+#!/bin/sh
+# perf stat tests
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+err=0
+test_default_stat() {
+ echo "Basic stat command test"
+ if ! perf stat true 2>&1 | egrep -q "Performance counter stats for 'true':"
+ then
+ echo "Basic stat command test [Failed]"
+ err=1
+ return
+ fi
+ echo "Basic stat command test [Success]"
+}
+
+test_stat_record_report() {
+ echo "stat record and report test"
+ if ! perf stat record -o - true | perf stat report -i - 2>&1 | \
+ egrep -q "Performance counter stats for 'pipe':"
+ then
+ echo "stat record and report test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat record and report test [Success]"
+}
+
+test_stat_repeat_weak_groups() {
+ echo "stat repeat weak groups test"
+ if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
+ true 2>&1 | grep -q 'seconds time elapsed'
+ then
+ echo "stat repeat weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ if ! perf stat -r2 -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}:W' \
+ true > /dev/null 2>&1
+ then
+ echo "stat repeat weak groups test [Failed]"
+ err=1
+ return
+ fi
+ echo "stat repeat weak groups test [Success]"
+}
+
+test_topdown_groups() {
+ # Topdown events must be grouped with the slots event first. Test that
+ # parse-events reorders this.
+ echo "Topdown event group test"
+ if ! perf stat -e '{slots,topdown-retiring}' true > /dev/null 2>&1
+ then
+ echo "Topdown event group test [Skipped event parsing failed]"
+ return
+ fi
+ if perf stat -e '{slots,topdown-retiring}' true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed events not supported]"
+ err=1
+ return
+ fi
+ if perf stat -e '{topdown-retiring,slots}' true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown event group test [Failed slots not reordered first]"
+ err=1
+ return
+ fi
+ echo "Topdown event group test [Success]"
+}
+
+test_topdown_weak_groups() {
+ # Weak groups break if the perf_event_open of multiple grouped events
+ # fails. Breaking a topdown group causes the events to fail. Test a very large
+ # grouping to see that the topdown events aren't broken out.
+ echo "Topdown weak groups test"
+ ok_grouping="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring},branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references"
+ if ! perf stat --no-merge -e "$ok_grouping" true > /dev/null 2>&1
+ then
+ echo "Topdown weak groups test [Skipped event parsing failed]"
+ return
+ fi
+ group_needs_break="{slots,topdown-bad-spec,topdown-be-bound,topdown-fe-bound,topdown-retiring,branch-instructions,branch-misses,bus-cycles,cache-misses,cache-references,cpu-cycles,instructions,mem-loads,mem-stores,ref-cycles,cache-misses,cache-references}:W"
+ if perf stat --no-merge -e "$group_needs_break" true 2>&1 | egrep -q "<not supported>"
+ then
+ echo "Topdown weak groups test [Failed events not supported]"
+ err=1
+ return
+ fi
+ echo "Topdown weak groups test [Success]"
+}
+
+test_default_stat
+test_stat_record_report
+test_stat_repeat_weak_groups
+test_topdown_groups
+test_topdown_weak_groups
+exit $err
diff --git a/tools/perf/tests/shell/stat_all_metricgroups.sh b/tools/perf/tests/shell/stat_all_metricgroups.sh
new file mode 100755
index 000000000..cb35e4888
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metricgroups.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# perf all metricgroups test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+for m in $(perf list --raw-dump metricgroups); do
+ echo "Testing $m"
+ perf stat -M "$m" -a true
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_all_metrics.sh b/tools/perf/tests/shell/stat_all_metrics.sh
new file mode 100755
index 000000000..22e9cb294
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_metrics.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# perf all metrics test
+# SPDX-License-Identifier: GPL-2.0
+
+err=0
+for m in $(perf list --raw-dump metrics); do
+ echo "Testing $m"
+ result=$(perf stat -M "$m" true 2>&1)
+ if [[ "$result" =~ "${m:0:50}" ]] || [[ "$result" =~ "<not supported>" ]]
+ then
+ continue
+ fi
+ # Failed so try system wide.
+ result=$(perf stat -M "$m" -a sleep 0.01 2>&1)
+ if [[ "$result" =~ "${m:0:50}" ]]
+ then
+ continue
+ fi
+ # Failed again, possibly the workload was too small so retry with something
+ # longer.
+ result=$(perf stat -M "$m" perf bench internals synthesize 2>&1)
+ if [[ "$result" =~ "${m:0:50}" ]]
+ then
+ continue
+ fi
+ echo "Metric '$m' not printed in:"
+ echo "$result"
+ if [[ "$err" != "1" ]]
+ then
+ err=2
+ if [[ "$result" =~ "FP_ARITH" || "$result" =~ "AMX" ]]
+ then
+ echo "Skip, not fail, for FP issues"
+ elif [[ "$result" =~ "PMM" ]]
+ then
+ echo "Skip, not fail, for Optane memory issues"
+ else
+ err=1
+ fi
+ fi
+done
+
+exit "$err"
diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh
new file mode 100755
index 000000000..c77955419
--- /dev/null
+++ b/tools/perf/tests/shell/stat_all_pmu.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+# perf all PMU test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Test all PMU events; however exclude parametrized ones (name contains '?')
+for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do
+ echo "Testing $p"
+ result=$(perf stat -e "$p" true 2>&1)
+ if ! echo "$result" | grep -q "$p" && ! echo "$result" | grep -q "<not supported>" ; then
+ # We failed to see the event and it is supported. Possibly the workload was
+ # too small so retry with something longer.
+ result=$(perf stat -e "$p" perf bench internals synthesize 2>&1)
+ if ! echo "$result" | grep -q "$p" ; then
+ echo "Event '$p' not printed in:"
+ echo "$result"
+ exit 1
+ fi
+ fi
+done
+
+exit 0
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
new file mode 100755
index 000000000..6bf24b852
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -0,0 +1,45 @@
+#!/bin/sh
+# perf stat --bpf-counters test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# check whether $2 is within +/- 10% of $1
+compare_number()
+{
+ first_num=$1
+ second_num=$2
+
+ # upper bound is first_num * 110%
+ upper=$(expr $first_num + $first_num / 10 )
+ # lower bound is first_num * 90%
+ lower=$(expr $first_num - $first_num / 10 )
+
+ if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
+ echo "The difference between $first_num and $second_num are greater than 10%."
+ exit 1
+ fi
+}
+
+# skip if --bpf-counters is not supported
+if ! perf stat -e cycles --bpf-counters true > /dev/null 2>&1; then
+ if [ "$1" = "-v" ]; then
+ echo "Skipping: --bpf-counters not supported"
+ perf --no-pager stat -e cycles --bpf-counters true || true
+ fi
+ exit 2
+fi
+
+base_cycles=$(perf stat --no-big-num -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$base_cycles" == "<not" ]; then
+ echo "Skipping: cycles event not counted"
+ exit 2
+fi
+bpf_cycles=$(perf stat --no-big-num --bpf-counters -e cycles -- perf bench sched messaging -g 1 -l 100 -t 2>&1 | awk '/cycles/ {print $1}')
+if [ "$bpf_cycles" == "<not" ]; then
+ echo "Failed: cycles not counted with --bpf-counters"
+ exit 1
+fi
+
+compare_number $base_cycles $bpf_cycles
+exit 0
diff --git a/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
new file mode 100755
index 000000000..e75d0780d
--- /dev/null
+++ b/tools/perf/tests/shell/stat_bpf_counters_cgrp.sh
@@ -0,0 +1,79 @@
+#!/bin/sh
+# perf stat --bpf-counters --for-each-cgroup test
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+test_cgroups=
+if [ "$1" = "-v" ]; then
+ verbose="1"
+fi
+
+# skip if --bpf-counters --for-each-cgroup is not supported
+check_bpf_counter()
+{
+ if ! perf stat -a --bpf-counters --for-each-cgroup / true > /dev/null 2>&1; then
+ if [ "${verbose}" = "1" ]; then
+ echo "Skipping: --bpf-counters --for-each-cgroup not supported"
+ perf --no-pager stat -a --bpf-counters --for-each-cgroup / true || true
+ fi
+ exit 2
+ fi
+}
+
+# find two cgroups to measure
+find_cgroups()
+{
+ # try usual systemd slices first
+ if [ -d /sys/fs/cgroup/system.slice ] && [ -d /sys/fs/cgroup/user.slice ]; then
+ test_cgroups="system.slice,user.slice"
+ return
+ fi
+
+ # try root and self cgroups
+ find_cgroups_self_cgrp=$(grep perf_event /proc/self/cgroup | cut -d: -f3)
+ if [ -z ${find_cgroups_self_cgrp} ]; then
+ # cgroup v2 doesn't specify perf_event
+ find_cgroups_self_cgrp=$(grep ^0: /proc/self/cgroup | cut -d: -f3)
+ fi
+
+ if [ -z ${find_cgroups_self_cgrp} ]; then
+ test_cgroups="/"
+ else
+ test_cgroups="/,${find_cgroups_self_cgrp}"
+ fi
+}
+
+# As cgroup events are cpu-wide, we cannot simply compare the result.
+# Just check if it runs without failure and has non-zero results.
+check_system_wide_counted()
+{
+ check_system_wide_counted_output=$(perf stat -a --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, sleep 1 2>&1)
+ if echo ${check_system_wide_counted_output} | grep -q -F "<not "; then
+ echo "Some system-wide events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${check_system_wide_counted_output}
+ fi
+ exit 1
+ fi
+}
+
+check_cpu_list_counted()
+{
+ check_cpu_list_counted_output=$(perf stat -C 0,1 --bpf-counters --for-each-cgroup ${test_cgroups} -e cpu-clock -x, taskset -c 1 sleep 1 2>&1)
+ if echo ${check_cpu_list_counted_output} | grep -q -F "<not "; then
+ echo "Some CPU events are not counted"
+ if [ "${verbose}" = "1" ]; then
+ echo ${check_cpu_list_counted_output}
+ fi
+ exit 1
+ fi
+}
+
+check_bpf_counter
+find_cgroups
+
+check_system_wide_counted
+check_cpu_list_counted
+
+exit 0
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
new file mode 100755
index 000000000..ec108d45d
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -0,0 +1,68 @@
+#!/bin/sh
+# Check Arm64 callgraphs are complete in fp mode
+# SPDX-License-Identifier: GPL-2.0
+
+lscpu | grep -q "aarch64" || exit 2
+
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+TEST_PROGRAM_SOURCE=$(mktemp /tmp/test_program.XXXXX.c)
+TEST_PROGRAM=$(mktemp /tmp/test_program.XXXXX)
+
+cleanup_files()
+{
+ rm -f $PERF_DATA
+ rm -f $TEST_PROGRAM_SOURCE
+ rm -f $TEST_PROGRAM
+}
+
+trap cleanup_files exit term int
+
+cat << EOF > $TEST_PROGRAM_SOURCE
+int a = 0;
+void leaf(void) {
+ for (;;)
+ a += a;
+}
+void parent(void) {
+ leaf();
+}
+int main(void) {
+ parent();
+ return 0;
+}
+EOF
+
+echo " + Compiling test program ($TEST_PROGRAM)..."
+
+CFLAGS="-g -O0 -fno-inline -fno-omit-frame-pointer"
+cc $CFLAGS $TEST_PROGRAM_SOURCE -o $TEST_PROGRAM || exit 1
+
+# Add a 1 second delay to skip samples that are not in the leaf() function
+perf record -o $PERF_DATA --call-graph fp -e cycles//u -D 1000 --user-callchains -- $TEST_PROGRAM 2> /dev/null &
+PID=$!
+
+echo " + Recording (PID=$PID)..."
+sleep 2
+echo " + Stopping perf-record..."
+
+kill $PID
+wait $PID
+
+# expected perf-script output:
+#
+# program
+# 728 leaf
+# 753 parent
+# 76c main
+# ...
+
+perf script -i $PERF_DATA -F comm,ip,sym | head -n4
+perf script -i $PERF_DATA -F comm,ip,sym | head -n4 | \
+ awk '{ if ($2 != "") sym[i++] = $2 } END { if (sym[0] != "leaf" ||
+ sym[1] != "parent" ||
+ sym[2] != "main") exit 1 }'
diff --git a/tools/perf/tests/shell/test_arm_coresight.sh b/tools/perf/tests/shell/test_arm_coresight.sh
new file mode 100755
index 000000000..daad786cf
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_coresight.sh
@@ -0,0 +1,188 @@
+#!/bin/sh
+# Check Arm CoreSight trace data recording and synthesized samples
+
+# Uses the 'perf record' to record trace data with Arm CoreSight sinks;
+# then verify if there have any branch samples and instruction samples
+# are generated by CoreSight with 'perf script' and 'perf report'
+# commands.
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2020
+
+glb_err=0
+
+skip_if_no_cs_etm_event() {
+ perf list | grep -q 'cs_etm//' && return 0
+
+ # cs_etm event doesn't exist
+ return 2
+}
+
+skip_if_no_cs_etm_event || exit 2
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+cleanup_files()
+{
+ rm -f ${perfdata}
+ rm -f ${file}
+ rm -f "${perfdata}.old"
+ trap - exit term int
+ exit $glb_err
+}
+
+trap cleanup_files exit term int
+
+record_touch_file() {
+ echo "Recording trace (only user mode) with path: CPU$2 => $1"
+ rm -f $file
+ perf record -o ${perfdata} -e cs_etm/@$1/u --per-thread \
+ -- taskset -c $2 touch $file > /dev/null 2>&1
+}
+
+perf_script_branch_samples() {
+ echo "Looking at perf.data file for dumping branch samples:"
+
+ # Below is an example of the branch samples dumping:
+ # touch 6512 1 branches:u: ffffb220824c strcmp+0xc (/lib/aarch64-linux-gnu/ld-2.27.so)
+ # touch 6512 1 branches:u: ffffb22082e0 strcmp+0xa0 (/lib/aarch64-linux-gnu/ld-2.27.so)
+ # touch 6512 1 branches:u: ffffb2208320 strcmp+0xe0 (/lib/aarch64-linux-gnu/ld-2.27.so)
+ perf script -F,-time -i ${perfdata} 2>&1 | \
+ egrep " +$1 +[0-9]+ .* +branches:(.*:)? +" > /dev/null 2>&1
+}
+
+perf_report_branch_samples() {
+ echo "Looking at perf.data file for reporting branch samples:"
+
+ # Below is an example of the branch samples reporting:
+ # 73.04% 73.04% touch libc-2.27.so [.] _dl_addr
+ # 7.71% 7.71% touch libc-2.27.so [.] getenv
+ # 2.59% 2.59% touch ld-2.27.so [.] strcmp
+ perf report --stdio -i ${perfdata} 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+% +[0-9]+\.[0-9]+% +$1 " > /dev/null 2>&1
+}
+
+perf_report_instruction_samples() {
+ echo "Looking at perf.data file for instruction samples:"
+
+ # Below is an example of the instruction samples reporting:
+ # 68.12% touch libc-2.27.so [.] _dl_addr
+ # 5.80% touch libc-2.27.so [.] getenv
+ # 4.35% touch ld-2.27.so [.] _dl_fixup
+ perf report --itrace=i20i --stdio -i ${perfdata} 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+% +$1" > /dev/null 2>&1
+}
+
+arm_cs_report() {
+ if [ $2 != 0 ]; then
+ echo "$1: FAIL"
+ glb_err=$2
+ else
+ echo "$1: PASS"
+ fi
+}
+
+is_device_sink() {
+ # If the node of "enable_sink" is existed under the device path, this
+ # means the device is a sink device. Need to exclude 'tpiu' since it
+ # cannot support perf PMU.
+ echo "$1" | egrep -q -v "tpiu"
+
+ if [ $? -eq 0 -a -e "$1/enable_sink" ]; then
+
+ pmu_dev="/sys/bus/event_source/devices/cs_etm/sinks/$2"
+
+ # Warn if the device is not supported by PMU
+ if ! [ -f $pmu_dev ]; then
+ echo "PMU doesn't support $pmu_dev"
+ fi
+
+ return 0
+ fi
+
+ # Otherwise, it's not a sink device
+ return 1
+}
+
+arm_cs_iterate_devices() {
+ for dev in $1/connections/out\:*; do
+
+ # Skip testing if it's not a directory
+ ! [ -d $dev ] && continue;
+
+ # Read out its symbol link file name
+ path=`readlink -f $dev`
+
+ # Extract device name from path, e.g.
+ # path = '/sys/devices/platform/20010000.etf/tmc_etf0'
+ # `> device_name = 'tmc_etf0'
+ device_name=$(basename $path)
+
+ if is_device_sink $path $device_name; then
+
+ record_touch_file $device_name $2 &&
+ perf_script_branch_samples touch &&
+ perf_report_branch_samples touch &&
+ perf_report_instruction_samples touch
+
+ err=$?
+ arm_cs_report "CoreSight path testing (CPU$2 -> $device_name)" $err
+ fi
+
+ arm_cs_iterate_devices $dev $2
+ done
+}
+
+arm_cs_etm_traverse_path_test() {
+ # Iterate for every ETM device
+ for dev in /sys/bus/coresight/devices/etm*; do
+
+ # Find the ETM device belonging to which CPU
+ cpu=`cat $dev/cpu`
+
+ # Use depth-first search (DFS) to iterate outputs
+ arm_cs_iterate_devices $dev $cpu
+ done
+}
+
+arm_cs_etm_system_wide_test() {
+ echo "Recording trace with system wide mode"
+ perf record -o ${perfdata} -e cs_etm// -a -- ls > /dev/null 2>&1
+
+ perf_script_branch_samples perf &&
+ perf_report_branch_samples perf &&
+ perf_report_instruction_samples perf
+
+ err=$?
+ arm_cs_report "CoreSight system wide testing" $err
+}
+
+arm_cs_etm_snapshot_test() {
+ echo "Recording trace with snapshot mode"
+ perf record -o ${perfdata} -e cs_etm// -S \
+ -- dd if=/dev/zero of=/dev/null > /dev/null 2>&1 &
+ PERFPID=$!
+
+ # Wait for perf program
+ sleep 1
+
+ # Send signal to snapshot trace data
+ kill -USR2 $PERFPID
+
+ # Stop perf program
+ kill $PERFPID
+ wait $PERFPID
+
+ perf_script_branch_samples dd &&
+ perf_report_branch_samples dd &&
+ perf_report_instruction_samples dd
+
+ err=$?
+ arm_cs_report "CoreSight snapshot testing" $err
+}
+
+arm_cs_etm_traverse_path_test
+arm_cs_etm_system_wide_test
+arm_cs_etm_snapshot_test
+exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe.sh b/tools/perf/tests/shell/test_arm_spe.sh
new file mode 100755
index 000000000..0d47479ad
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe.sh
@@ -0,0 +1,113 @@
+#!/bin/sh
+# Check Arm SPE trace data recording and synthesized samples
+
+# Uses the 'perf record' to record trace data of Arm SPE events;
+# then verify if any SPE event samples are generated by SPE with
+# 'perf script' and 'perf report' commands.
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2021
+
+skip_if_no_arm_spe_event() {
+ perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+
+ # arm_spe event doesn't exist
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+glb_err=0
+
+cleanup_files()
+{
+ rm -f ${perfdata}
+ rm -f ${perfdata}.old
+ exit $glb_err
+}
+
+trap cleanup_files exit term int
+
+arm_spe_report() {
+ if [ $2 = 0 ]; then
+ echo "$1: PASS"
+ elif [ $2 = 2 ]; then
+ echo "$1: SKIPPED"
+ else
+ echo "$1: FAIL"
+ glb_err=$2
+ fi
+}
+
+perf_script_samples() {
+ echo "Looking at perf.data file for dumping samples:"
+
+ # from arm-spe.c/arm_spe_synth_events()
+ events="(ld1-miss|ld1-access|llc-miss|lld-access|tlb-miss|tlb-access|branch-miss|remote-access|memory)"
+
+ # Below is an example of the samples dumping:
+ # dd 3048 [002] 1 l1d-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 tlb-access: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ # dd 3048 [002] 1 memory: ffffaa64999c __GI___libc_write+0x3c (/lib/aarch64-linux-gnu/libc-2.27.so)
+ perf script -F,-time -i ${perfdata} 2>&1 | \
+ egrep " +$1 +[0-9]+ .* +${events}:(.*:)? +" > /dev/null 2>&1
+}
+
+perf_report_samples() {
+ echo "Looking at perf.data file for reporting samples:"
+
+ # Below is an example of the samples reporting:
+ # 73.04% 73.04% dd libc-2.27.so [.] _dl_addr
+ # 7.71% 7.71% dd libc-2.27.so [.] getenv
+ # 2.59% 2.59% dd ld-2.27.so [.] strcmp
+ perf report --stdio -i ${perfdata} 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+% +[0-9]+\.[0-9]+% +$1 " > /dev/null 2>&1
+}
+
+arm_spe_snapshot_test() {
+ echo "Recording trace with snapshot mode $perfdata"
+ perf record -o ${perfdata} -e arm_spe// -S \
+ -- dd if=/dev/zero of=/dev/null > /dev/null 2>&1 &
+ PERFPID=$!
+
+ # Wait for perf program
+ sleep 1
+
+ # Send signal to snapshot trace data
+ kill -USR2 $PERFPID
+
+ # Stop perf program
+ kill $PERFPID
+ wait $PERFPID
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE snapshot testing" $err
+}
+
+arm_spe_system_wide_test() {
+ echo "Recording trace with system-wide mode $perfdata"
+
+ perf record -o - -e dummy -a -B true > /dev/null 2>&1
+ if [ $? != 0 ]; then
+ arm_spe_report "SPE system-wide testing" 2
+ return
+ fi
+
+ perf record -o ${perfdata} -e arm_spe// -a --no-bpf-event \
+ -- dd if=/dev/zero of=/dev/null count=100000 > /dev/null 2>&1
+
+ perf_script_samples dd &&
+ perf_report_samples dd
+
+ err=$?
+ arm_spe_report "SPE system-wide testing" $err
+}
+
+arm_spe_snapshot_test
+arm_spe_system_wide_test
+
+exit $glb_err
diff --git a/tools/perf/tests/shell/test_arm_spe_fork.sh b/tools/perf/tests/shell/test_arm_spe_fork.sh
new file mode 100755
index 000000000..c920d3583
--- /dev/null
+++ b/tools/perf/tests/shell/test_arm_spe_fork.sh
@@ -0,0 +1,92 @@
+#!/bin/sh
+# Check Arm SPE doesn't hang when there are forks
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+skip_if_no_arm_spe_event() {
+ perf list | egrep -q 'arm_spe_[0-9]+//' && return 0
+ return 2
+}
+
+skip_if_no_arm_spe_event || exit 2
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+TEST_PROGRAM_SOURCE=$(mktemp /tmp/__perf_test.program.XXXXX.c)
+TEST_PROGRAM=$(mktemp /tmp/__perf_test.program.XXXXX)
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_RECORD_LOG=$(mktemp /tmp/__perf_test.log.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_RECORD_LOG}
+ rm -f ${PERF_DATA}
+ rm -f ${TEST_PROGRAM_SOURCE}
+ rm -f ${TEST_PROGRAM}
+}
+
+trap cleanup_files exit term int
+
+# compile test program
+cat << EOF > $TEST_PROGRAM_SOURCE
+#include <math.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+int workload() {
+ while (1)
+ sqrt(rand());
+ return 0;
+}
+
+int main() {
+ switch (fork()) {
+ case 0:
+ return workload();
+ case -1:
+ return 1;
+ default:
+ wait(NULL);
+ }
+ return 0;
+}
+EOF
+
+echo "Compiling test program..."
+CFLAGS="-lm"
+cc $TEST_PROGRAM_SOURCE $CFLAGS -o $TEST_PROGRAM || exit 1
+
+echo "Recording workload..."
+perf record -o ${PERF_DATA} -e arm_spe/period=65536/ -vvv -- $TEST_PROGRAM > ${PERF_RECORD_LOG} 2>&1 &
+PERFPID=$!
+
+# Check if perf hangs by checking the perf-record logs.
+sleep 1
+log0=$(wc -l $PERF_RECORD_LOG)
+echo Log lines = $log0
+sleep 1
+log1=$(wc -l $PERF_RECORD_LOG)
+echo Log lines after 1 second = $log1
+
+kill $PERFPID
+wait $PERFPID
+# test program may leave an orphan process running the workload
+killall $(basename $TEST_PROGRAM)
+
+if [ "$log0" = "$log1" ];
+then
+ echo "SPE hang test: FAIL"
+ exit 1
+else
+ echo "SPE hang test: PASS"
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh
new file mode 100755
index 000000000..d7ff5c4b4
--- /dev/null
+++ b/tools/perf/tests/shell/test_brstack.sh
@@ -0,0 +1,118 @@
+#!/bin/sh
+# Check branch stack sampling
+
+# SPDX-License-Identifier: GPL-2.0
+# German Gomez <german.gomez@arm.com>, 2022
+
+# we need a C compiler to build the test programs
+# so bail if none is found
+if ! [ -x "$(command -v cc)" ]; then
+ echo "failed: no compiler, install gcc"
+ exit 2
+fi
+
+# skip the test if the hardware doesn't support branch stack sampling
+# and if the architecture doesn't support filter types: any,save_type,u
+if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then
+ echo "skip: system doesn't support filter types: any,save_type,u"
+ exit 2
+fi
+
+TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX)
+
+cleanup() {
+ rm -rf $TMPDIR
+}
+
+trap cleanup exit term int
+
+gen_test_program() {
+ # generate test program
+ cat << EOF > $1
+#define BENCH_RUNS 999999
+int cnt;
+void bar(void) {
+} /* return */
+void foo(void) {
+ bar(); /* call */
+} /* return */
+void bench(void) {
+ void (*foo_ind)(void) = foo;
+ if ((cnt++) % 3) /* branch (cond) */
+ foo(); /* call */
+ bar(); /* call */
+ foo_ind(); /* call (ind) */
+}
+int main(void)
+{
+ int cnt = 0;
+ while (1) {
+ if ((cnt++) > BENCH_RUNS)
+ break;
+ bench(); /* call */
+ } /* branch (uncond) */
+ return 0;
+}
+EOF
+}
+
+test_user_branches() {
+ echo "Testing user branch stack sampling"
+
+ gen_test_program "$TEMPDIR/program.c"
+ cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out
+
+ perf record -o $TMPDIR/perf.data --branch-filter any,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstacksym | xargs -n1 > $TMPDIR/perf.script
+
+ # example of branch entries:
+ # foo+0x14/bar+0x40/P/-/-/0/CALL
+
+ set -x
+ egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/IND_CALL$" $TMPDIR/perf.script
+ egrep -m1 "^foo\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/foo\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/bar\+[^ ]*/CALL$" $TMPDIR/perf.script
+ egrep -m1 "^bar\+[^ ]*/foo\+[^ ]*/RET$" $TMPDIR/perf.script
+ egrep -m1 "^foo\+[^ ]*/bench\+[^ ]*/RET$" $TMPDIR/perf.script
+ egrep -m1 "^bench\+[^ ]*/bench\+[^ ]*/COND$" $TMPDIR/perf.script
+ egrep -m1 "^main\+[^ ]*/main\+[^ ]*/UNCOND$" $TMPDIR/perf.script
+ set +x
+
+ # some branch types are still not being tested:
+ # IND COND_CALL COND_RET SYSCALL SYSRET IRQ SERROR NO_TX
+}
+
+# first argument <arg0> is the argument passed to "--branch-stack <arg0>,save_type,u"
+# second argument are the expected branch types for the given filter
+test_filter() {
+ local filter=$1
+ local expect=$2
+
+ echo "Testing branch stack filtering permutation ($filter,$expect)"
+
+ gen_test_program "$TEMPDIR/program.c"
+ cc -fno-inline -g "$TEMPDIR/program.c" -o $TMPDIR/a.out
+
+ perf record -o $TMPDIR/perf.data --branch-filter $filter,save_type,u -- $TMPDIR/a.out > /dev/null 2>&1
+ perf script -i $TMPDIR/perf.data --fields brstack | xargs -n1 > $TMPDIR/perf.script
+
+ # fail if we find any branch type that doesn't match any of the expected ones
+ # also consider UNKNOWN branch types (-)
+ if egrep -vm1 "^[^ ]*/($expect|-|( *))$" $TMPDIR/perf.script; then
+ return 1
+ fi
+}
+
+set -e
+
+test_user_branches
+
+test_filter "any_call" "CALL|IND_CALL|COND_CALL|SYSCALL|IRQ"
+test_filter "call" "CALL|SYSCALL"
+test_filter "cond" "COND"
+test_filter "any_ret" "RET|COND_RET|SYSRET|ERET"
+
+test_filter "call,cond" "CALL|SYSCALL|COND"
+test_filter "any_call,cond" "CALL|IND_CALL|COND_CALL|IRQ|SYSCALL|COND"
+test_filter "cond,any_call,any_ret" "COND|CALL|IND_CALL|COND_CALL|SYSCALL|IRQ|RET|COND_RET|SYSRET|ERET"
diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh
new file mode 100755
index 000000000..cd6eb54d2
--- /dev/null
+++ b/tools/perf/tests/shell/test_data_symbol.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+# Test data symbol
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2022
+
+skip_if_no_mem_event() {
+ perf mem record -e list 2>&1 | egrep -q 'available' && return 0
+ return 2
+}
+
+skip_if_no_mem_event || exit 2
+
+# skip if there's no compiler
+if ! [ -x "$(command -v cc)" ]; then
+ echo "skip: no compiler, install gcc"
+ exit 2
+fi
+
+TEST_PROGRAM=$(mktemp /tmp/__perf_test.program.XXXXX)
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+
+check_result() {
+ # The memory report format is as below:
+ # 99.92% ... [.] buf1+0x38
+ result=$(perf mem report -i ${PERF_DATA} -s symbol_daddr -q 2>&1 |
+ awk '/buf1/ { print $4 }')
+
+ # Testing is failed if has no any sample for "buf1"
+ [ -z "$result" ] && return 1
+
+ while IFS= read -r line; do
+ # The "data1" and "data2" fields in structure "buf1" have
+ # offset "0x0" and "0x38", returns failure if detect any
+ # other offset value.
+ if [ "$line" != "buf1+0x0" ] && [ "$line" != "buf1+0x38" ]; then
+ return 1
+ fi
+ done <<< "$result"
+
+ return 0
+}
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA}
+ rm -f ${TEST_PROGRAM}
+}
+
+trap cleanup_files exit term int
+
+# compile test program
+echo "Compiling test program..."
+cat << EOF | cc -o ${TEST_PROGRAM} -x c -
+typedef struct _buf {
+ char data1;
+ char reserved[55];
+ char data2;
+} buf __attribute__((aligned(64)));
+
+static buf buf1;
+
+int main(void) {
+ for (;;) {
+ buf1.data1++;
+ buf1.data2 += buf1.data1;
+ }
+ return 0;
+}
+EOF
+
+echo "Recording workload..."
+
+# perf mem/c2c internally uses IBS PMU on AMD CPU which doesn't support
+# user/kernel filtering and per-process monitoring, spin program on
+# specific CPU and test in per-CPU mode.
+is_amd=$(egrep -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo)
+if (($is_amd >= 1)); then
+ perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM &
+else
+ perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM &
+fi
+
+PERFPID=$!
+
+sleep 1
+
+kill $PERFPID
+wait $PERFPID
+
+check_result
+exit $?
diff --git a/tools/perf/tests/shell/test_intel_pt.sh b/tools/perf/tests/shell/test_intel_pt.sh
new file mode 100755
index 000000000..f5ed7b1af
--- /dev/null
+++ b/tools/perf/tests/shell/test_intel_pt.sh
@@ -0,0 +1,663 @@
+#!/bin/sh
+# Miscellaneous Intel PT testing
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# Skip if no Intel PT
+perf list | grep -q 'intel_pt//' || exit 2
+
+shelldir=$(dirname "$0")
+. "${shelldir}"/lib/waiting.sh
+
+skip_cnt=0
+ok_cnt=0
+err_cnt=0
+
+temp_dir=$(mktemp -d /tmp/perf-test-intel-pt-sh.XXXXXXXXXX)
+
+tmpfile="${temp_dir}/tmp-perf.data"
+perfdatafile="${temp_dir}/test-perf.data"
+outfile="${temp_dir}/test-out.txt"
+errfile="${temp_dir}/test-err.txt"
+workload="${temp_dir}/workload"
+awkscript="${temp_dir}/awkscript"
+jitdump_workload="${temp_dir}/jitdump_workload"
+maxbrstack="${temp_dir}/maxbrstack.py"
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ sane=$(echo "${temp_dir}" | cut -b 1-26)
+ if [ "${sane}" = "/tmp/perf-test-intel-pt-sh" ] ; then
+ echo "--- Cleaning up ---"
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+# perf record for testing without decoding
+perf_record_no_decode()
+{
+ # Options to speed up recording: no post-processing, no build-id cache update,
+ # and no BPF events.
+ perf record -B -N --no-bpf-event "$@"
+}
+
+# perf record for testing should not need BPF events
+perf_record_no_bpf()
+{
+ # Options for no BPF events
+ perf record --no-bpf-event "$@"
+}
+
+have_workload=false
+cat << _end_of_file_ | /usr/bin/cc -o "${workload}" -xc - -pthread && have_workload=true
+#include <time.h>
+#include <pthread.h>
+
+void work(void) {
+ struct timespec tm = {
+ .tv_nsec = 1000000,
+ };
+ int i;
+
+ /* Run for about 30 seconds */
+ for (i = 0; i < 30000; i++)
+ nanosleep(&tm, NULL);
+}
+
+void *threadfunc(void *arg) {
+ work();
+ return NULL;
+}
+
+int main(void) {
+ pthread_t th;
+
+ pthread_create(&th, NULL, threadfunc, NULL);
+ work();
+ pthread_join(th, NULL);
+ return 0;
+}
+_end_of_file_
+
+can_cpu_wide()
+{
+ echo "Checking for CPU-wide recording on CPU $1"
+ if ! perf_record_no_decode -o "${tmpfile}" -e dummy:u -C "$1" true >/dev/null 2>&1 ; then
+ echo "No so skipping"
+ return 2
+ fi
+ echo OK
+ return 0
+}
+
+test_system_wide_side_band()
+{
+ echo "--- Test system-wide sideband ---"
+
+ # Need CPU 0 and CPU 1
+ can_cpu_wide 0 || return $?
+ can_cpu_wide 1 || return $?
+
+ # Record on CPU 0 a task running on CPU 1
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u -C 0 -- taskset --cpu-list 1 uname
+
+ # Should get MMAP events from CPU 1 because they can be needed to decode
+ mmap_cnt=$(perf script -i "${perfdatafile}" --no-itrace --show-mmap-events -C 1 2>/dev/null | grep -c MMAP)
+
+ if [ "${mmap_cnt}" -gt 0 ] ; then
+ echo OK
+ return 0
+ fi
+
+ echo "Failed to record MMAP events on CPU 1 when tracing CPU 0"
+ return 1
+}
+
+can_kernel()
+{
+ if [ -z "${can_kernel_trace}" ] ; then
+ can_kernel_trace=0
+ perf_record_no_decode -o "${tmpfile}" -e dummy:k true >/dev/null 2>&1 && can_kernel_trace=1
+ fi
+ if [ ${can_kernel_trace} -eq 0 ] ; then
+ echo "SKIP: no kernel tracing"
+ return 2
+ fi
+ return 0
+}
+
+test_per_thread()
+{
+ k="$1"
+ desc="$2"
+
+ echo "--- Test per-thread ${desc}recording ---"
+
+ if ! $have_workload ; then
+ echo "No workload, so skipping"
+ return 2
+ fi
+
+ if [ "${k}" = "k" ] ; then
+ can_kernel || return 2
+ fi
+
+ cat <<- "_end_of_file_" > "${awkscript}"
+ BEGIN {
+ s = "[ ]*"
+ u = s"[0-9]+"s
+ d = s"[0-9-]+"s
+ x = s"[0-9a-fA-FxX]+"s
+ mmapping = "idx"u": mmapping fd"u
+ set_output = "idx"u": set output fd"u"->"u
+ perf_event_open = "sys_perf_event_open: pid"d"cpu"d"group_fd"d"flags"x"="u
+ }
+
+ /perf record opening and mmapping events/ {
+ if (!done)
+ active = 1
+ }
+
+ /perf record done opening and mmapping events/ {
+ active = 0
+ done = 1
+ }
+
+ $0 ~ perf_event_open && active {
+ match($0, perf_event_open)
+ $0 = substr($0, RSTART, RLENGTH)
+ pid = $3
+ cpu = $5
+ fd = $11
+ print "pid " pid " cpu " cpu " fd " fd " : " $0
+ fd_array[fd] = fd
+ pid_array[fd] = pid
+ cpu_array[fd] = cpu
+ }
+
+ $0 ~ mmapping && active {
+ match($0, mmapping)
+ $0 = substr($0, RSTART, RLENGTH)
+ fd = $5
+ print "fd " fd " : " $0
+ if (fd in fd_array) {
+ mmap_array[fd] = 1
+ } else {
+ print "Unknown fd " fd
+ exit 1
+ }
+ }
+
+ $0 ~ set_output && active {
+ match($0, set_output)
+ $0 = substr($0, RSTART, RLENGTH)
+ fd = $6
+ fd_to = $8
+ print "fd " fd " fd_to " fd_to " : " $0
+ if (fd in fd_array) {
+ if (fd_to in fd_array) {
+ set_output_array[fd] = fd_to
+ } else {
+ print "Unknown fd " fd_to
+ exit 1
+ }
+ } else {
+ print "Unknown fd " fd
+ exit 1
+ }
+ }
+
+ END {
+ print "Checking " length(fd_array) " fds"
+ for (fd in fd_array) {
+ if (fd in mmap_array) {
+ pid = pid_array[fd]
+ if (pid != -1) {
+ if (pid in pids) {
+ print "More than 1 mmap for PID " pid
+ exit 1
+ }
+ pids[pid] = 1
+ }
+ cpu = cpu_array[fd]
+ if (cpu != -1) {
+ if (cpu in cpus) {
+ print "More than 1 mmap for CPU " cpu
+ exit 1
+ }
+ cpus[cpu] = 1
+ }
+ } else if (!(fd in set_output_array)) {
+ print "No mmap for fd " fd
+ exit 1
+ }
+ }
+ n = length(pids)
+ if (n != thread_cnt) {
+ print "Expected " thread_cnt " per-thread mmaps - found " n
+ exit 1
+ }
+ }
+ _end_of_file_
+
+ $workload &
+ w1=$!
+ $workload &
+ w2=$!
+ echo "Workload PIDs are $w1 and $w2"
+ wait_for_threads ${w1} 2
+ wait_for_threads ${w2} 2
+
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u"${k}" -vvv --per-thread -p "${w1},${w2}" 2>"${errfile}" >"${outfile}" &
+ ppid=$!
+ echo "perf PID is $ppid"
+ wait_for_perf_to_start ${ppid} "${errfile}" || return 1
+
+ kill ${w1}
+ wait_for_process_to_exit ${w1} || return 1
+ is_running ${ppid} || return 1
+
+ kill ${w2}
+ wait_for_process_to_exit ${w2} || return 1
+ wait_for_process_to_exit ${ppid} || return 1
+
+ awk -v thread_cnt=4 -f "${awkscript}" "${errfile}" || return 1
+
+ echo OK
+ return 0
+}
+
+test_jitdump()
+{
+ echo "--- Test tracing self-modifying code that uses jitdump ---"
+
+ script_path=$(realpath "$0")
+ script_dir=$(dirname "$script_path")
+ jitdump_incl_dir="${script_dir}/../../util"
+ jitdump_h="${jitdump_incl_dir}/jitdump.h"
+
+ if [ ! -e "${jitdump_h}" ] ; then
+ echo "SKIP: Include file jitdump.h not found"
+ return 2
+ fi
+
+ if [ -z "${have_jitdump_workload}" ] ; then
+ have_jitdump_workload=false
+ # Create a workload that uses self-modifying code and generates its own jitdump file
+ cat <<- "_end_of_file_" | /usr/bin/cc -o "${jitdump_workload}" -I "${jitdump_incl_dir}" -xc - -pthread && have_jitdump_workload=true
+ #define _GNU_SOURCE
+ #include <sys/mman.h>
+ #include <sys/types.h>
+ #include <stddef.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <unistd.h>
+ #include <string.h>
+
+ #include "jitdump.h"
+
+ #define CHK_BYTE 0x5a
+
+ static inline uint64_t rdtsc(void)
+ {
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((uint64_t)high) << 32;
+ }
+
+ static FILE *open_jitdump(void)
+ {
+ struct jitheader header = {
+ .magic = JITHEADER_MAGIC,
+ .version = JITHEADER_VERSION,
+ .total_size = sizeof(header),
+ .pid = getpid(),
+ .timestamp = rdtsc(),
+ .flags = JITDUMP_FLAGS_ARCH_TIMESTAMP,
+ };
+ char filename[256];
+ FILE *f;
+ void *m;
+
+ snprintf(filename, sizeof(filename), "jit-%d.dump", getpid());
+ f = fopen(filename, "w+");
+ if (!f)
+ goto err;
+ /* Create an MMAP event for the jitdump file. That is how perf tool finds it. */
+ m = mmap(0, 4096, PROT_READ | PROT_EXEC, MAP_PRIVATE, fileno(f), 0);
+ if (m == MAP_FAILED)
+ goto err_close;
+ munmap(m, 4096);
+ if (fwrite(&header,sizeof(header),1,f) != 1)
+ goto err_close;
+ return f;
+
+ err_close:
+ fclose(f);
+ err:
+ return NULL;
+ }
+
+ static int write_jitdump(FILE *f, void *addr, const uint8_t *dat, size_t sz, uint64_t *idx)
+ {
+ struct jr_code_load rec = {
+ .p.id = JIT_CODE_LOAD,
+ .p.total_size = sizeof(rec) + sz,
+ .p.timestamp = rdtsc(),
+ .pid = getpid(),
+ .tid = gettid(),
+ .vma = (unsigned long)addr,
+ .code_addr = (unsigned long)addr,
+ .code_size = sz,
+ .code_index = ++*idx,
+ };
+
+ if (fwrite(&rec,sizeof(rec),1,f) != 1 ||
+ fwrite(dat, sz, 1, f) != 1)
+ return -1;
+ return 0;
+ }
+
+ static void close_jitdump(FILE *f)
+ {
+ fclose(f);
+ }
+
+ int main()
+ {
+ /* Get a memory page to store executable code */
+ void *addr = mmap(0, 4096, PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ /* Code to execute: mov CHK_BYTE, %eax ; ret */
+ uint8_t dat[] = {0xb8, CHK_BYTE, 0x00, 0x00, 0x00, 0xc3};
+ FILE *f = open_jitdump();
+ uint64_t idx = 0;
+ int ret = 1;
+
+ if (!f)
+ return 1;
+ /* Copy executable code to executable memory page */
+ memcpy(addr, dat, sizeof(dat));
+ /* Record it in the jitdump file */
+ if (write_jitdump(f, addr, dat, sizeof(dat), &idx))
+ goto out_close;
+ /* Call it */
+ ret = ((int (*)(void))addr)() - CHK_BYTE;
+ out_close:
+ close_jitdump(f);
+ return ret;
+ }
+ _end_of_file_
+ fi
+
+ if ! $have_jitdump_workload ; then
+ echo "SKIP: No jitdump workload"
+ return 2
+ fi
+
+ # Change to temp_dir so jitdump collateral files go there
+ cd "${temp_dir}"
+ perf_record_no_bpf -o "${tmpfile}" -e intel_pt//u "${jitdump_workload}"
+ perf inject -i "${tmpfile}" -o "${perfdatafile}" --jit
+ decode_br_cnt=$(perf script -i "${perfdatafile}" --itrace=b | wc -l)
+ # Note that overflow and lost errors are suppressed for the error count
+ decode_err_cnt=$(perf script -i "${perfdatafile}" --itrace=e-o-l | grep -ci error)
+ cd -
+ # Should be thousands of branches
+ if [ "${decode_br_cnt}" -lt 1000 ] ; then
+ echo "Decode failed, only ${decode_br_cnt} branches"
+ return 1
+ fi
+ # Should be no errors
+ if [ "${decode_err_cnt}" -ne 0 ] ; then
+ echo "Decode failed, ${decode_err_cnt} errors"
+ perf script -i "${perfdatafile}" --itrace=e-o-l --show-mmap-events | cat
+ return 1
+ fi
+
+ echo OK
+ return 0
+}
+
+test_packet_filter()
+{
+ echo "--- Test with MTC and TSC disabled ---"
+ # Disable MTC and TSC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/mtc=0,tsc=0/u uname
+ # Should not get MTC packet
+ mtc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "MTC 0x")
+ if [ "${mtc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with mtc=0"
+ return 1
+ fi
+ # Should not get TSC package
+ tsc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TSC 0x")
+ if [ "${tsc_cnt}" -ne 0 ] ; then
+ echo "Failed to filter with tsc=0"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_disable_branch()
+{
+ echo "--- Test with branches disabled ---"
+ # Disable branch
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/branch=0/u uname
+ # Should not get branch related packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TNT 0x")
+ tip_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "TIP 0x")
+ fup_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "FUP 0x")
+ if [ "${tnt_cnt}" -ne 0 ] || [ "${tip_cnt}" -ne 0 ] || [ "${fup_cnt}" -ne 0 ] ; then
+ echo "Failed to disable branches"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_time_cyc()
+{
+ echo "--- Test with/without CYC ---"
+ # Check if CYC is supported
+ cyc=$(cat /sys/bus/event_source/devices/intel_pt/caps/psb_cyc)
+ if [ "${cyc}" != "1" ] ; then
+ echo "SKIP: CYC is not supported"
+ return 2
+ fi
+ # Enable CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/cyc/u uname
+ # should get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" = "0" ] ; then
+ echo "Failed to get CYC packet"
+ return 1
+ fi
+ # Without CYC
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt//u uname
+ # Should not get CYC packets
+ cyc_cnt=$(perf script -i "${perfdatafile}" -D 2>/dev/null | grep -c "CYC 0x")
+ if [ "${cyc_cnt}" -gt 0 ] ; then
+ echo "Still get CYC packet without cyc"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_sample()
+{
+ echo "--- Test recording with sample mode ---"
+ # Check if recording with sample mode is working
+ if ! perf_record_no_decode -o "${perfdatafile}" --aux-sample=8192 -e '{intel_pt//u,branch-misses:u}' uname ; then
+ echo "perf record failed with --aux-sample"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_kernel_trace()
+{
+ echo "--- Test with kernel trace ---"
+ # Check if recording with kernel trace is working
+ can_kernel || return 2
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt//k -m1,128 uname ; then
+ echo "perf record failed with intel_pt//k"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_virtual_lbr()
+{
+ echo "--- Test virtual LBR ---"
+ # Check if python script is supported
+ libpython=$(perf version --build-options | grep python | grep -cv OFF)
+ if [ "${libpython}" != "1" ] ; then
+ echo "SKIP: python scripting is not supported"
+ return 2
+ fi
+
+ # Python script to determine the maximum size of branch stacks
+ cat << "_end_of_file_" > "${maxbrstack}"
+from __future__ import print_function
+
+bmax = 0
+
+def process_event(param_dict):
+ if "brstack" in param_dict:
+ brstack = param_dict["brstack"]
+ n = len(brstack)
+ global bmax
+ if n > bmax:
+ bmax = n
+
+def trace_end():
+ print("max brstack", bmax)
+_end_of_file_
+
+ # Check if virtual lbr is working
+ perf_record_no_bpf -o "${perfdatafile}" --aux-sample -e '{intel_pt//,cycles}:u' uname
+ times_val=$(perf script -i "${perfdatafile}" --itrace=L -s "${maxbrstack}" 2>/dev/null | grep "max brstack " | cut -d " " -f 3)
+ case "${times_val}" in
+ [0-9]*) ;;
+ *) times_val=0;;
+ esac
+ if [ "${times_val}" -lt 2 ] ; then
+ echo "Failed with virtual lbr"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_power_event()
+{
+ echo "--- Test power events ---"
+ # Check if power events are supported
+ power_event=$(cat /sys/bus/event_source/devices/intel_pt/caps/power_event_trace)
+ if [ "${power_event}" != "1" ] ; then
+ echo "SKIP: power_event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -a -e intel_pt/pwr_evt/u uname ; then
+ echo "perf record failed with pwr_evt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_no_tnt()
+{
+ echo "--- Test with TNT packets disabled ---"
+ # Check if TNT disable is supported
+ notnt=$(cat /sys/bus/event_source/devices/intel_pt/caps/tnt_disable)
+ if [ "${notnt}" != "1" ] ; then
+ echo "SKIP: tnt_disable is not supported"
+ return 2
+ fi
+ perf_record_no_decode -o "${perfdatafile}" -e intel_pt/notnt/u uname
+ # Should be no TNT packets
+ tnt_cnt=$(perf script -i "${perfdatafile}" -D | grep -c TNT)
+ if [ "${tnt_cnt}" -ne 0 ] ; then
+ echo "TNT packets still there after notnt"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+test_event_trace()
+{
+ echo "--- Test with event_trace ---"
+ # Check if event_trace is supported
+ event_trace=$(cat /sys/bus/event_source/devices/intel_pt/caps/event_trace)
+ if [ "${event_trace}" != 1 ] ; then
+ echo "SKIP: event_trace is not supported"
+ return 2
+ fi
+ if ! perf_record_no_decode -o "${perfdatafile}" -e intel_pt/event/u uname ; then
+ echo "perf record failed with event trace"
+ return 1
+ fi
+ echo OK
+ return 0
+}
+
+count_result()
+{
+ if [ "$1" -eq 2 ] ; then
+ skip_cnt=$((skip_cnt + 1))
+ return
+ fi
+ if [ "$1" -eq 0 ] ; then
+ ok_cnt=$((ok_cnt + 1))
+ return
+ fi
+ err_cnt=$((err_cnt + 1))
+}
+
+ret=0
+test_system_wide_side_band || ret=$? ; count_result $ret ; ret=0
+test_per_thread "" "" || ret=$? ; count_result $ret ; ret=0
+test_per_thread "k" "(incl. kernel) " || ret=$? ; count_result $ret ; ret=0
+test_jitdump || ret=$? ; count_result $ret ; ret=0
+test_packet_filter || ret=$? ; count_result $ret ; ret=0
+test_disable_branch || ret=$? ; count_result $ret ; ret=0
+test_time_cyc || ret=$? ; count_result $ret ; ret=0
+test_sample || ret=$? ; count_result $ret ; ret=0
+test_kernel_trace || ret=$? ; count_result $ret ; ret=0
+test_virtual_lbr || ret=$? ; count_result $ret ; ret=0
+test_power_event || ret=$? ; count_result $ret ; ret=0
+test_no_tnt || ret=$? ; count_result $ret ; ret=0
+test_event_trace || ret=$? ; count_result $ret ; ret=0
+
+cleanup
+
+echo "--- Done ---"
+
+if [ ${err_cnt} -gt 0 ] ; then
+ exit 1
+fi
+
+if [ ${ok_cnt} -gt 0 ] ; then
+ exit 0
+fi
+
+exit 2
diff --git a/tools/perf/tests/shell/test_java_symbol.sh b/tools/perf/tests/shell/test_java_symbol.sh
new file mode 100755
index 000000000..f22122580
--- /dev/null
+++ b/tools/perf/tests/shell/test_java_symbol.sh
@@ -0,0 +1,75 @@
+#!/bin/bash
+# Test java symbol
+
+# SPDX-License-Identifier: GPL-2.0
+# Leo Yan <leo.yan@linaro.org>, 2022
+
+# skip if there's no jshell
+if ! [ -x "$(command -v jshell)" ]; then
+ echo "skip: no jshell, install JDK"
+ exit 2
+fi
+
+PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
+PERF_INJ_DATA=$(mktemp /tmp/__perf_test.perf.data.inj.XXXXX)
+
+cleanup_files()
+{
+ echo "Cleaning up files..."
+ rm -f ${PERF_DATA}
+ rm -f ${PERF_INJ_DATA}
+}
+
+trap cleanup_files exit term int
+
+if [ -e "$PWD/tools/perf/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PWD/tools/perf/libperf-jvmti.so
+elif [ -e "$PWD/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PWD/libperf-jvmti.so
+elif [ -e "$PREFIX/lib64/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PREFIX/lib64/libperf-jvmti.so
+elif [ -e "$PREFIX/lib/libperf-jvmti.so" ]; then
+ LIBJVMTI=$PREFIX/lib/libperf-jvmti.so
+elif [ -e "/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so" ]; then
+ LIBJVMTI=/usr/lib/linux-tools-$(uname -a | awk '{ print $3 }' | sed -r 's/-generic//')/libperf-jvmti.so
+else
+ echo "Fail to find libperf-jvmti.so"
+ # JVMTI is a build option, skip the test if fail to find lib
+ exit 2
+fi
+
+cat <<EOF | perf record -k 1 -o $PERF_DATA jshell -s -J-agentpath:$LIBJVMTI
+int fib(int x) {
+ return x > 1 ? fib(x - 2) + fib(x - 1) : 1;
+}
+
+int q = 0;
+
+for (int i = 0; i < 10; i++)
+ q += fib(i);
+
+System.out.println(q);
+EOF
+
+if [ $? -ne 0 ]; then
+ echo "Fail to record for java program"
+ exit 1
+fi
+
+if ! perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
+ echo "Fail to inject samples"
+ exit 1
+fi
+
+# Below is an example of the instruction samples reporting:
+# 8.18% jshell jitted-50116-29.so [.] Interpreter
+# 0.75% Thread-1 jitted-83602-1670.so [.] jdk.internal.jimage.BasicImageReader.getString(int)
+perf report --stdio -i ${PERF_INJ_DATA} 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+% .* (Interpreter|jdk\.internal).*" > /dev/null 2>&1
+
+if [ $? -ne 0 ]; then
+ echo "Fail to find java symbols"
+ exit 1
+fi
+
+exit 0
diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
new file mode 100644
index 000000000..319f36ebb
--- /dev/null
+++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+# test perf probe of function from different CU
+# SPDX-License-Identifier: GPL-2.0
+
+set -e
+
+# skip if there's no gcc
+if ! [ -x "$(command -v gcc)" ]; then
+ echo "failed: no gcc compiler"
+ exit 2
+fi
+
+temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX)
+
+cleanup()
+{
+ trap - EXIT TERM INT
+ if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then
+ echo "--- Cleaning up ---"
+ perf probe -x ${temp_dir}/testfile -d foo || true
+ rm -f "${temp_dir}/"*
+ rmdir "${temp_dir}"
+ fi
+}
+
+trap_cleanup()
+{
+ cleanup
+ exit 1
+}
+
+trap trap_cleanup EXIT TERM INT
+
+cat > ${temp_dir}/testfile-foo.h << EOF
+struct t
+{
+ int *p;
+ int c;
+};
+
+extern int foo (int i, struct t *t);
+EOF
+
+cat > ${temp_dir}/testfile-foo.c << EOF
+#include "testfile-foo.h"
+
+int
+foo (int i, struct t *t)
+{
+ int j, res = 0;
+ for (j = 0; j < i && j < t->c; j++)
+ res += t->p[j];
+
+ return res;
+}
+EOF
+
+cat > ${temp_dir}/testfile-main.c << EOF
+#include "testfile-foo.h"
+
+static struct t g;
+
+int
+main (int argc, char **argv)
+{
+ int i;
+ int j[argc];
+ g.c = argc;
+ g.p = j;
+ for (i = 0; i < argc; i++)
+ j[i] = (int) argv[i][0];
+ return foo (3, &g);
+}
+EOF
+
+gcc -g -Og -flto -c ${temp_dir}/testfile-foo.c -o ${temp_dir}/testfile-foo.o
+gcc -g -Og -c ${temp_dir}/testfile-main.c -o ${temp_dir}/testfile-main.o
+gcc -g -Og -o ${temp_dir}/testfile ${temp_dir}/testfile-foo.o ${temp_dir}/testfile-main.o
+
+perf probe -x ${temp_dir}/testfile --funcs foo
+perf probe -x ${temp_dir}/testfile foo
+
+cleanup
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
new file mode 100755
index 000000000..3d60e993d
--- /dev/null
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# Check open filename arg using perf trace + vfs_getname
+
+# Uses the 'perf test shell' library to add probe:vfs_getname to the system
+# then use it with 'perf trace' using 'touch' to write to a temp file, then
+# checks that that was captured by the vfs_getname was used by 'perf trace',
+# that already handles "probe:vfs_getname" if present, and used in the
+# "open" syscall "filename" argument beautifier.
+
+# SPDX-License-Identifier: GPL-2.0
+# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
+
+. $(dirname $0)/lib/probe.sh
+
+skip_if_no_perf_probe || exit 2
+skip_if_no_perf_trace || exit 2
+
+. $(dirname $0)/lib/probe_vfs_getname.sh
+
+trace_open_vfs_getname() {
+ evts=$(echo $(perf list syscalls:sys_enter_open* 2>/dev/null | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ perf trace -e $evts touch $file 2>&1 | \
+ egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
+}
+
+
+add_probe_vfs_getname || skip_if_no_debuginfo
+err=$?
+if [ $err -ne 0 ] ; then
+ exit $err
+fi
+
+file=$(mktemp /tmp/temporary_file.XXXXX)
+
+# Do not use whatever ~/.perfconfig file, it may change the output
+# via trace.{show_timestamp,show_prefix,etc}
+export PERF_CONFIG=/dev/null
+
+trace_open_vfs_getname
+err=$?
+rm -f ${file}
+cleanup_probe_vfs_getname
+exit $err
diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c
new file mode 100644
index 000000000..1de7478ec
--- /dev/null
+++ b/tools/perf/tests/sigtrap.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic test for sigtrap support.
+ *
+ * Copyright (C) 2021, Google LLC.
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/string.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/ioctl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "cloexec.h"
+#include "debug.h"
+#include "event.h"
+#include "tests.h"
+#include "../perf-sys.h"
+
+#define NUM_THREADS 5
+
+static struct {
+ int tids_want_signal; /* Which threads still want a signal. */
+ int signal_count; /* Sanity check number of signals received. */
+ volatile int iterate_on; /* Variable to set breakpoint on. */
+ siginfo_t first_siginfo; /* First observed siginfo_t. */
+} ctx;
+
+#define TEST_SIG_DATA (~(unsigned long)(&ctx.iterate_on))
+
+static struct perf_event_attr make_event_attr(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_BREAKPOINT,
+ .size = sizeof(attr),
+ .sample_period = 1,
+ .disabled = 1,
+ .bp_addr = (unsigned long)&ctx.iterate_on,
+ .bp_type = HW_BREAKPOINT_RW,
+ .bp_len = HW_BREAKPOINT_LEN_1,
+ .inherit = 1, /* Children inherit events ... */
+ .inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */
+ .remove_on_exec = 1, /* Required by sigtrap. */
+ .sigtrap = 1, /* Request synchronous SIGTRAP on event. */
+ .sig_data = TEST_SIG_DATA,
+ .exclude_kernel = 1, /* To allow */
+ .exclude_hv = 1, /* running as !root */
+ };
+ return attr;
+}
+
+#ifdef HAVE_BPF_SKEL
+#include <bpf/btf.h>
+
+static bool attr_has_sigtrap(void)
+{
+ bool ret = false;
+ struct btf *btf;
+ const struct btf_type *t;
+ const struct btf_member *m;
+ const char *name;
+ int i, id;
+
+ btf = btf__load_vmlinux_btf();
+ if (btf == NULL) {
+ /* should be an old kernel */
+ return false;
+ }
+
+ id = btf__find_by_name_kind(btf, "perf_event_attr", BTF_KIND_STRUCT);
+ if (id < 0)
+ goto out;
+
+ t = btf__type_by_id(btf, id);
+ for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
+ name = btf__name_by_offset(btf, m->name_off);
+ if (!strcmp(name, "sigtrap")) {
+ ret = true;
+ break;
+ }
+ }
+out:
+ btf__free(btf);
+ return ret;
+}
+#else /* !HAVE_BPF_SKEL */
+static bool attr_has_sigtrap(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_DUMMY,
+ .size = sizeof(attr),
+ .remove_on_exec = 1, /* Required by sigtrap. */
+ .sigtrap = 1, /* Request synchronous SIGTRAP on event. */
+ };
+ int fd;
+ bool ret = false;
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag());
+ if (fd >= 0) {
+ ret = true;
+ close(fd);
+ }
+
+ return ret;
+}
+#endif /* HAVE_BPF_SKEL */
+
+static void
+sigtrap_handler(int signum __maybe_unused, siginfo_t *info, void *ucontext __maybe_unused)
+{
+ if (!__atomic_fetch_add(&ctx.signal_count, 1, __ATOMIC_RELAXED))
+ ctx.first_siginfo = *info;
+ __atomic_fetch_sub(&ctx.tids_want_signal, syscall(SYS_gettid), __ATOMIC_RELAXED);
+}
+
+static void *test_thread(void *arg)
+{
+ pthread_barrier_t *barrier = (pthread_barrier_t *)arg;
+ pid_t tid = syscall(SYS_gettid);
+ int i;
+
+ pthread_barrier_wait(barrier);
+
+ __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
+ for (i = 0; i < ctx.iterate_on - 1; i++)
+ __atomic_fetch_add(&ctx.tids_want_signal, tid, __ATOMIC_RELAXED);
+
+ return NULL;
+}
+
+static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier)
+{
+ int i;
+
+ pthread_barrier_wait(barrier);
+ for (i = 0; i < NUM_THREADS; i++)
+ TEST_ASSERT_EQUAL("pthread_join() failed", pthread_join(threads[i], NULL), 0);
+
+ return TEST_OK;
+}
+
+static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier)
+{
+ int ret;
+
+ ctx.iterate_on = 3000;
+
+ TEST_ASSERT_EQUAL("misfired signal?", ctx.signal_count, 0);
+ TEST_ASSERT_EQUAL("enable failed", ioctl(fd, PERF_EVENT_IOC_ENABLE, 0), 0);
+ ret = run_test_threads(threads, barrier);
+ TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0);
+
+ TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on);
+ TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0);
+ TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on);
+#if 0 /* FIXME: enable when libc's signal.h has si_perf_{type,data} */
+ TEST_ASSERT_EQUAL("unexpected si_perf_type", ctx.first_siginfo.si_perf_type,
+ PERF_TYPE_BREAKPOINT);
+ TEST_ASSERT_EQUAL("unexpected si_perf_data", ctx.first_siginfo.si_perf_data,
+ TEST_SIG_DATA);
+#endif
+
+ return ret;
+}
+
+static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_event_attr attr = make_event_attr();
+ struct sigaction action = {};
+ struct sigaction oldact;
+ pthread_t threads[NUM_THREADS];
+ pthread_barrier_t barrier;
+ char sbuf[STRERR_BUFSIZE];
+ int i, fd, ret = TEST_FAIL;
+
+ if (!BP_SIGNAL_IS_SUPPORTED) {
+ pr_debug("Test not supported on this architecture");
+ return TEST_SKIP;
+ }
+
+ pthread_barrier_init(&barrier, NULL, NUM_THREADS + 1);
+
+ action.sa_flags = SA_SIGINFO | SA_NODEFER;
+ action.sa_sigaction = sigtrap_handler;
+ sigemptyset(&action.sa_mask);
+ if (sigaction(SIGTRAP, &action, &oldact)) {
+ pr_debug("FAILED sigaction(): %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out;
+ }
+
+ fd = sys_perf_event_open(&attr, 0, -1, -1, perf_event_open_cloexec_flag());
+ if (fd < 0) {
+ if (attr_has_sigtrap()) {
+ pr_debug("FAILED sys_perf_event_open(): %s\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ } else {
+ pr_debug("perf_event_attr doesn't have sigtrap\n");
+ ret = TEST_SKIP;
+ }
+ goto out_restore_sigaction;
+ }
+
+ for (i = 0; i < NUM_THREADS; i++) {
+ if (pthread_create(&threads[i], NULL, test_thread, &barrier)) {
+ pr_debug("FAILED pthread_create(): %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_close_perf_event;
+ }
+ }
+
+ ret = run_stress_test(fd, threads, &barrier);
+
+out_close_perf_event:
+ close(fd);
+out_restore_sigaction:
+ sigaction(SIGTRAP, &oldact, NULL);
+out:
+ pthread_barrier_destroy(&barrier);
+ return ret;
+}
+
+DEFINE_SUITE("Sigtrap", sigtrap);
diff --git a/tools/perf/tests/stat.c b/tools/perf/tests/stat.c
new file mode 100644
index 000000000..500974040
--- /dev/null
+++ b/tools/perf/tests/stat.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include "event.h"
+#include "tests.h"
+#include "stat.h"
+#include "counts.h"
+#include "debug.h"
+#include "util/synthetic-events.h"
+
+static bool has_term(struct perf_record_stat_config *config,
+ u64 tag, u64 val)
+{
+ unsigned i;
+
+ for (i = 0; i < config->nr; i++) {
+ if ((config->data[i].tag == tag) &&
+ (config->data[i].val == val))
+ return true;
+ }
+
+ return false;
+}
+
+static int process_stat_config_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_stat_config *config = &event->stat_config;
+ struct perf_stat_config stat_config;
+
+#define HAS(term, val) \
+ has_term(config, PERF_STAT_CONFIG_TERM__##term, val)
+
+ TEST_ASSERT_VAL("wrong nr", config->nr == PERF_STAT_CONFIG_TERM__MAX);
+ TEST_ASSERT_VAL("wrong aggr_mode", HAS(AGGR_MODE, AGGR_CORE));
+ TEST_ASSERT_VAL("wrong scale", HAS(SCALE, 1));
+ TEST_ASSERT_VAL("wrong interval", HAS(INTERVAL, 1));
+
+#undef HAS
+
+ perf_event__read_stat_config(&stat_config, config);
+
+ TEST_ASSERT_VAL("wrong aggr_mode", stat_config.aggr_mode == AGGR_CORE);
+ TEST_ASSERT_VAL("wrong scale", stat_config.scale == 1);
+ TEST_ASSERT_VAL("wrong interval", stat_config.interval == 1);
+ return 0;
+}
+
+static int test__synthesize_stat_config(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ struct perf_stat_config stat_config = {
+ .aggr_mode = AGGR_CORE,
+ .scale = 1,
+ .interval = 1,
+ };
+
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat_config(NULL, &stat_config, process_stat_config_event, NULL));
+
+ return 0;
+}
+
+static int process_stat_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_stat *st = &event->stat;
+
+ TEST_ASSERT_VAL("wrong cpu", st->cpu == 1);
+ TEST_ASSERT_VAL("wrong thread", st->thread == 2);
+ TEST_ASSERT_VAL("wrong id", st->id == 3);
+ TEST_ASSERT_VAL("wrong val", st->val == 100);
+ TEST_ASSERT_VAL("wrong run", st->ena == 200);
+ TEST_ASSERT_VAL("wrong ena", st->run == 300);
+ return 0;
+}
+
+static int test__synthesize_stat(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_counts_values count;
+
+ count.val = 100;
+ count.ena = 200;
+ count.run = 300;
+
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat(NULL, (struct perf_cpu){.cpu = 1}, 2, 3,
+ &count, process_stat_event, NULL));
+
+ return 0;
+}
+
+static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_stat_round *stat_round = &event->stat_round;
+
+ TEST_ASSERT_VAL("wrong time", stat_round->time == 0xdeadbeef);
+ TEST_ASSERT_VAL("wrong type", stat_round->type == PERF_STAT_ROUND_TYPE__INTERVAL);
+ return 0;
+}
+
+static int test__synthesize_stat_round(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("failed to synthesize stat_config",
+ !perf_event__synthesize_stat_round(NULL, 0xdeadbeef, PERF_STAT_ROUND_TYPE__INTERVAL,
+ process_stat_round_event, NULL));
+
+ return 0;
+}
+
+DEFINE_SUITE("Synthesize stat config", synthesize_stat_config);
+DEFINE_SUITE("Synthesize stat", synthesize_stat);
+DEFINE_SUITE("Synthesize stat round", synthesize_stat_round);
diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c
new file mode 100644
index 000000000..9cd6fec37
--- /dev/null
+++ b/tools/perf/tests/sw-clock.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <linux/string.h>
+
+#include "tests.h"
+#include "util/debug.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/cpumap.h"
+#include "util/mmap.h"
+#include "util/thread_map.h"
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+#define NR_LOOPS 10000000
+
+/*
+ * This test will open software clock events (cpu-clock, task-clock)
+ * then check their frequency -> period conversion has no artifact of
+ * setting period to 1 forcefully.
+ */
+static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
+{
+ int i, err = -1;
+ volatile int tmp = 0;
+ u64 total_periods = 0;
+ int nr_samples = 0;
+ char sbuf[STRERR_BUFSIZE];
+ union perf_event *event;
+ struct evsel *evsel;
+ struct evlist *evlist;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = clock_id,
+ .sample_type = PERF_SAMPLE_PERIOD,
+ .exclude_kernel = 1,
+ .disabled = 1,
+ .freq = 1,
+ };
+ struct perf_cpu_map *cpus = NULL;
+ struct perf_thread_map *threads = NULL;
+ struct mmap *md;
+
+ attr.sample_freq = 500;
+
+ evlist = evlist__new();
+ if (evlist == NULL) {
+ pr_debug("evlist__new\n");
+ return -1;
+ }
+
+ evsel = evsel__new(&attr);
+ if (evsel == NULL) {
+ pr_debug("evsel__new\n");
+ goto out_delete_evlist;
+ }
+ evlist__add(evlist, evsel);
+
+ cpus = perf_cpu_map__dummy_new();
+ threads = thread_map__new_by_tid(getpid());
+ if (!cpus || !threads) {
+ err = -ENOMEM;
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ if (evlist__open(evlist)) {
+ const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
+
+ err = -errno;
+ pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
+ str_error_r(errno, sbuf, sizeof(sbuf)),
+ knob, (u64)attr.sample_freq);
+ goto out_delete_evlist;
+ }
+
+ err = evlist__mmap(evlist, 128);
+ if (err < 0) {
+ pr_debug("failed to mmap event: %d (%s)\n", errno,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ evlist__enable(evlist);
+
+ /* collect samples */
+ for (i = 0; i < NR_LOOPS; i++)
+ tmp++;
+
+ evlist__disable(evlist);
+
+ md = &evlist->mmap[0];
+ if (perf_mmap__read_init(&md->core) < 0)
+ goto out_init;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ goto next_event;
+
+ err = evlist__parse_sample(evlist, event, &sample);
+ if (err < 0) {
+ pr_debug("Error during parse sample\n");
+ goto out_delete_evlist;
+ }
+
+ total_periods += sample.period;
+ nr_samples++;
+next_event:
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+
+out_init:
+ if ((u64) nr_samples == total_periods) {
+ pr_debug("All (%d) samples have period value of 1!\n",
+ nr_samples);
+ err = -1;
+ }
+
+out_delete_evlist:
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+ evlist__delete(evlist);
+ return err;
+}
+
+static int test__sw_clock_freq(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int ret;
+
+ ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
+ if (!ret)
+ ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
+
+ return ret;
+}
+
+DEFINE_SUITE("Software clock events period values", sw_clock_freq);
diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c
new file mode 100644
index 000000000..87f565c7f
--- /dev/null
+++ b/tools/perf/tests/switch-tracking.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/time.h>
+#include <sys/prctl.h>
+#include <errno.h>
+#include <limits.h>
+#include <time.h>
+#include <stdlib.h>
+#include <linux/zalloc.h>
+#include <linux/err.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+#include "debug.h"
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "record.h"
+#include "tests.h"
+#include "util/mmap.h"
+#include "pmu.h"
+
+static int spin_sleep(void)
+{
+ struct timeval start, now, diff, maxtime;
+ struct timespec ts;
+ int err, i;
+
+ maxtime.tv_sec = 0;
+ maxtime.tv_usec = 50000;
+
+ err = gettimeofday(&start, NULL);
+ if (err)
+ return err;
+
+ /* Spin for 50ms */
+ while (1) {
+ for (i = 0; i < 1000; i++)
+ barrier();
+
+ err = gettimeofday(&now, NULL);
+ if (err)
+ return err;
+
+ timersub(&now, &start, &diff);
+ if (timercmp(&diff, &maxtime, > /* For checkpatch */))
+ break;
+ }
+
+ ts.tv_nsec = 50 * 1000 * 1000;
+ ts.tv_sec = 0;
+
+ /* Sleep for 50ms */
+ err = nanosleep(&ts, NULL);
+ if (err == EINTR)
+ err = 0;
+
+ return err;
+}
+
+struct switch_tracking {
+ struct evsel *switch_evsel;
+ struct evsel *cycles_evsel;
+ pid_t *tids;
+ int nr_tids;
+ int comm_seen[4];
+ int cycles_before_comm_1;
+ int cycles_between_comm_2_and_comm_3;
+ int cycles_after_comm_4;
+};
+
+static int check_comm(struct switch_tracking *switch_tracking,
+ union perf_event *event, const char *comm, int nr)
+{
+ if (event->header.type == PERF_RECORD_COMM &&
+ (pid_t)event->comm.pid == getpid() &&
+ (pid_t)event->comm.tid == getpid() &&
+ strcmp(event->comm.comm, comm) == 0) {
+ if (switch_tracking->comm_seen[nr]) {
+ pr_debug("Duplicate comm event\n");
+ return -1;
+ }
+ switch_tracking->comm_seen[nr] = 1;
+ pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr);
+ return 1;
+ }
+ return 0;
+}
+
+static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
+{
+ int i, nr = cpu + 1;
+
+ if (cpu < 0)
+ return -1;
+
+ if (!switch_tracking->tids) {
+ switch_tracking->tids = calloc(nr, sizeof(pid_t));
+ if (!switch_tracking->tids)
+ return -1;
+ for (i = 0; i < nr; i++)
+ switch_tracking->tids[i] = -1;
+ switch_tracking->nr_tids = nr;
+ return 0;
+ }
+
+ if (cpu >= switch_tracking->nr_tids) {
+ void *addr;
+
+ addr = realloc(switch_tracking->tids, nr * sizeof(pid_t));
+ if (!addr)
+ return -1;
+ switch_tracking->tids = addr;
+ for (i = switch_tracking->nr_tids; i < nr; i++)
+ switch_tracking->tids[i] = -1;
+ switch_tracking->nr_tids = nr;
+ return 0;
+ }
+
+ return 0;
+}
+
+static int process_sample_event(struct evlist *evlist,
+ union perf_event *event,
+ struct switch_tracking *switch_tracking)
+{
+ struct perf_sample sample;
+ struct evsel *evsel;
+ pid_t next_tid, prev_tid;
+ int cpu, err;
+
+ if (evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ evsel = evlist__id2evsel(evlist, sample.id);
+ if (evsel == switch_tracking->switch_evsel) {
+ next_tid = evsel__intval(evsel, &sample, "next_pid");
+ prev_tid = evsel__intval(evsel, &sample, "prev_pid");
+ cpu = sample.cpu;
+ pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n",
+ cpu, prev_tid, next_tid);
+ err = check_cpu(switch_tracking, cpu);
+ if (err)
+ return err;
+ /*
+ * Check for no missing sched_switch events i.e. that the
+ * evsel->core.system_wide flag has worked.
+ */
+ if (switch_tracking->tids[cpu] != -1 &&
+ switch_tracking->tids[cpu] != prev_tid) {
+ pr_debug("Missing sched_switch events\n");
+ return -1;
+ }
+ switch_tracking->tids[cpu] = next_tid;
+ }
+
+ if (evsel == switch_tracking->cycles_evsel) {
+ pr_debug3("cycles event\n");
+ if (!switch_tracking->comm_seen[0])
+ switch_tracking->cycles_before_comm_1 = 1;
+ if (switch_tracking->comm_seen[1] &&
+ !switch_tracking->comm_seen[2])
+ switch_tracking->cycles_between_comm_2_and_comm_3 = 1;
+ if (switch_tracking->comm_seen[3])
+ switch_tracking->cycles_after_comm_4 = 1;
+ }
+
+ return 0;
+}
+
+static int process_event(struct evlist *evlist, union perf_event *event,
+ struct switch_tracking *switch_tracking)
+{
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ return process_sample_event(evlist, event, switch_tracking);
+
+ if (event->header.type == PERF_RECORD_COMM) {
+ int err, done = 0;
+
+ err = check_comm(switch_tracking, event, "Test COMM 1", 0);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 2", 1);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 3", 2);
+ if (err < 0)
+ return -1;
+ done += err;
+ err = check_comm(switch_tracking, event, "Test COMM 4", 3);
+ if (err < 0)
+ return -1;
+ done += err;
+ if (done != 1) {
+ pr_debug("Unexpected comm event\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+struct event_node {
+ struct list_head list;
+ union perf_event *event;
+ u64 event_time;
+};
+
+static int add_event(struct evlist *evlist, struct list_head *events,
+ union perf_event *event)
+{
+ struct perf_sample sample;
+ struct event_node *node;
+
+ node = malloc(sizeof(struct event_node));
+ if (!node) {
+ pr_debug("malloc failed\n");
+ return -1;
+ }
+ node->event = event;
+ list_add(&node->list, events);
+
+ if (evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ if (!sample.time) {
+ pr_debug("event with no time\n");
+ return -1;
+ }
+
+ node->event_time = sample.time;
+
+ return 0;
+}
+
+static void free_event_nodes(struct list_head *events)
+{
+ struct event_node *node;
+
+ while (!list_empty(events)) {
+ node = list_entry(events->next, struct event_node, list);
+ list_del_init(&node->list);
+ free(node);
+ }
+}
+
+static int compar(const void *a, const void *b)
+{
+ const struct event_node *nodea = a;
+ const struct event_node *nodeb = b;
+ s64 cmp = nodea->event_time - nodeb->event_time;
+
+ return cmp;
+}
+
+static int process_events(struct evlist *evlist,
+ struct switch_tracking *switch_tracking)
+{
+ union perf_event *event;
+ unsigned pos, cnt = 0;
+ LIST_HEAD(events);
+ struct event_node *events_array, *node;
+ struct mmap *md;
+ int i, ret;
+
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ md = &evlist->mmap[i];
+ if (perf_mmap__read_init(&md->core) < 0)
+ continue;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ cnt += 1;
+ ret = add_event(evlist, &events, event);
+ perf_mmap__consume(&md->core);
+ if (ret < 0)
+ goto out_free_nodes;
+ }
+ perf_mmap__read_done(&md->core);
+ }
+
+ events_array = calloc(cnt, sizeof(struct event_node));
+ if (!events_array) {
+ pr_debug("calloc failed\n");
+ ret = -1;
+ goto out_free_nodes;
+ }
+
+ pos = 0;
+ list_for_each_entry(node, &events, list)
+ events_array[pos++] = *node;
+
+ qsort(events_array, cnt, sizeof(struct event_node), compar);
+
+ for (pos = 0; pos < cnt; pos++) {
+ ret = process_event(evlist, events_array[pos].event,
+ switch_tracking);
+ if (ret < 0)
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ pr_debug("%u events recorded\n", cnt);
+ free(events_array);
+out_free_nodes:
+ free_event_nodes(&events);
+ return ret;
+}
+
+/**
+ * test__switch_tracking - test using sched_switch and tracking events.
+ *
+ * This function implements a test that checks that sched_switch events and
+ * tracking events can be recorded for a workload (current process) using the
+ * evsel->core.system_wide and evsel->tracking flags (respectively) with other events
+ * sometimes enabled or disabled.
+ */
+static int test__switch_tracking(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ const char *sched_switch = "sched:sched_switch";
+ const char *cycles = "cycles:u";
+ struct switch_tracking switch_tracking = { .tids = NULL, };
+ struct record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct perf_thread_map *threads = NULL;
+ struct perf_cpu_map *cpus = NULL;
+ struct evlist *evlist = NULL;
+ struct evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
+ struct evsel *switch_evsel, *tracking_evsel;
+ const char *comm;
+ int err = -1;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ if (!threads) {
+ pr_debug("thread_map__new failed!\n");
+ goto out_err;
+ }
+
+ cpus = perf_cpu_map__new(NULL);
+ if (!cpus) {
+ pr_debug("perf_cpu_map__new failed!\n");
+ goto out_err;
+ }
+
+ evlist = evlist__new();
+ if (!evlist) {
+ pr_debug("evlist__new failed!\n");
+ goto out_err;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ /* First event */
+ err = parse_event(evlist, "cpu-clock:u");
+ if (err) {
+ pr_debug("Failed to parse event dummy:u\n");
+ goto out_err;
+ }
+
+ cpu_clocks_evsel = evlist__last(evlist);
+
+ /* Second event */
+ if (perf_pmu__has_hybrid()) {
+ cycles = "cpu_core/cycles/u";
+ err = parse_event(evlist, cycles);
+ if (err) {
+ cycles = "cpu_atom/cycles/u";
+ pr_debug("Trying %s\n", cycles);
+ err = parse_event(evlist, cycles);
+ }
+ } else {
+ err = parse_event(evlist, cycles);
+ }
+ if (err) {
+ pr_debug("Failed to parse event %s\n", cycles);
+ goto out_err;
+ }
+
+ cycles_evsel = evlist__last(evlist);
+
+ /* Third event */
+ if (!evlist__can_select_event(evlist, sched_switch)) {
+ pr_debug("No sched_switch\n");
+ err = 0;
+ goto out;
+ }
+
+ switch_evsel = evlist__add_sched_switch(evlist, true);
+ if (IS_ERR(switch_evsel)) {
+ err = PTR_ERR(switch_evsel);
+ pr_debug("Failed to create event %s\n", sched_switch);
+ goto out_err;
+ }
+
+ switch_evsel->immediate = true;
+
+ /* Test moving an event to the front */
+ if (cycles_evsel == evlist__first(evlist)) {
+ pr_debug("cycles event already at front");
+ goto out_err;
+ }
+ evlist__to_front(evlist, cycles_evsel);
+ if (cycles_evsel != evlist__first(evlist)) {
+ pr_debug("Failed to move cycles event to front");
+ goto out_err;
+ }
+
+ evsel__set_sample_bit(cycles_evsel, CPU);
+ evsel__set_sample_bit(cycles_evsel, TIME);
+
+ /* Fourth event */
+ err = parse_event(evlist, "dummy:u");
+ if (err) {
+ pr_debug("Failed to parse event dummy:u\n");
+ goto out_err;
+ }
+
+ tracking_evsel = evlist__last(evlist);
+
+ evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->core.attr.freq = 0;
+ tracking_evsel->core.attr.sample_period = 1;
+
+ evsel__set_sample_bit(tracking_evsel, TIME);
+
+ /* Config events */
+ evlist__config(evlist, &opts, NULL);
+
+ /* Check moved event is still at the front */
+ if (cycles_evsel != evlist__first(evlist)) {
+ pr_debug("Front event no longer at front");
+ goto out_err;
+ }
+
+ /* Check tracking event is tracking */
+ if (!tracking_evsel->core.attr.mmap || !tracking_evsel->core.attr.comm) {
+ pr_debug("Tracking event not tracking\n");
+ goto out_err;
+ }
+
+ /* Check non-tracking events are not tracking */
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel != tracking_evsel) {
+ if (evsel->core.attr.mmap || evsel->core.attr.comm) {
+ pr_debug("Non-tracking event is tracking\n");
+ goto out_err;
+ }
+ }
+ }
+
+ if (evlist__open(evlist) < 0) {
+ pr_debug("Not supported\n");
+ err = 0;
+ goto out;
+ }
+
+ err = evlist__mmap(evlist, UINT_MAX);
+ if (err) {
+ pr_debug("evlist__mmap failed!\n");
+ goto out_err;
+ }
+
+ evlist__enable(evlist);
+
+ err = evsel__disable(cpu_clocks_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 1";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = evsel__disable(cycles_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 2";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 3";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = evsel__enable(cycles_evsel);
+ if (err) {
+ pr_debug("perf_evlist__disable_event failed!\n");
+ goto out_err;
+ }
+
+ comm = "Test COMM 4";
+ err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
+ if (err) {
+ pr_debug("PR_SET_NAME failed!\n");
+ goto out_err;
+ }
+
+ err = spin_sleep();
+ if (err) {
+ pr_debug("spin_sleep failed!\n");
+ goto out_err;
+ }
+
+ evlist__disable(evlist);
+
+ switch_tracking.switch_evsel = switch_evsel;
+ switch_tracking.cycles_evsel = cycles_evsel;
+
+ err = process_events(evlist, &switch_tracking);
+
+ zfree(&switch_tracking.tids);
+
+ if (err)
+ goto out_err;
+
+ /* Check all 4 comm events were seen i.e. that evsel->tracking works */
+ if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] ||
+ !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) {
+ pr_debug("Missing comm events\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got enabled */
+ if (!switch_tracking.cycles_before_comm_1) {
+ pr_debug("Missing cycles events\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got disabled */
+ if (switch_tracking.cycles_between_comm_2_and_comm_3) {
+ pr_debug("cycles events even though event was disabled\n");
+ goto out_err;
+ }
+
+ /* Check cycles event got enabled again */
+ if (!switch_tracking.cycles_after_comm_4) {
+ pr_debug("Missing cycles events\n");
+ goto out_err;
+ }
+out:
+ if (evlist) {
+ evlist__disable(evlist);
+ evlist__delete(evlist);
+ }
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+
+ return err;
+
+out_err:
+ err = -1;
+ goto out;
+}
+
+DEFINE_SUITE("Track with sched_switch", switch_tracking);
diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c
new file mode 100644
index 000000000..25f075fa9
--- /dev/null
+++ b/tools/perf/tests/task-exit.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "debug.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "target.h"
+#include "thread_map.h"
+#include "tests.h"
+#include "util/mmap.h"
+
+#include <errno.h>
+#include <signal.h>
+#include <linux/string.h>
+#include <perf/cpumap.h>
+#include <perf/evlist.h>
+#include <perf/mmap.h>
+
+static int exited;
+static int nr_exit;
+
+static void sig_handler(int sig __maybe_unused)
+{
+ exited = 1;
+}
+
+/*
+ * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * we asked by setting its exec_error to this handler.
+ */
+static void workload_exec_failed_signal(int signo __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *ucontext __maybe_unused)
+{
+ exited = 1;
+ nr_exit = -1;
+}
+
+/*
+ * This test will start a workload that does nothing then it checks
+ * if the number of exit event reported by the kernel is 1 or not
+ * in order to check the kernel returns correct number of event.
+ */
+static int test__task_exit(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ int err = -1;
+ union perf_event *event;
+ struct evsel *evsel;
+ struct evlist *evlist;
+ struct target target = {
+ .uid = UINT_MAX,
+ .uses_mmap = true,
+ };
+ const char *argv[] = { "true", NULL };
+ char sbuf[STRERR_BUFSIZE];
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ struct mmap *md;
+ int retry_count = 0;
+
+ signal(SIGCHLD, sig_handler);
+
+ evlist = evlist__new_default();
+ if (evlist == NULL) {
+ pr_debug("evlist__new_default\n");
+ return -1;
+ }
+
+ /*
+ * Create maps of threads and cpus to monitor. In this case
+ * we start with all threads and cpus (-1, -1) but then in
+ * evlist__prepare_workload we'll fill in the only thread
+ * we're monitoring, the one forked there.
+ */
+ cpus = perf_cpu_map__dummy_new();
+ threads = thread_map__new_by_tid(-1);
+ if (!cpus || !threads) {
+ err = -ENOMEM;
+ pr_debug("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ perf_evlist__set_maps(&evlist->core, cpus, threads);
+
+ err = evlist__prepare_workload(evlist, &target, argv, false, workload_exec_failed_signal);
+ if (err < 0) {
+ pr_debug("Couldn't run the workload!\n");
+ goto out_delete_evlist;
+ }
+
+ evsel = evlist__first(evlist);
+ evsel->core.attr.task = 1;
+#ifdef __s390x__
+ evsel->core.attr.sample_freq = 1000000;
+#else
+ evsel->core.attr.sample_freq = 1;
+#endif
+ evsel->core.attr.inherit = 0;
+ evsel->core.attr.watermark = 0;
+ evsel->core.attr.wakeup_events = 1;
+ evsel->core.attr.exclude_kernel = 1;
+
+ err = evlist__open(evlist);
+ if (err < 0) {
+ pr_debug("Couldn't open the evlist: %s\n",
+ str_error_r(-err, sbuf, sizeof(sbuf)));
+ goto out_delete_evlist;
+ }
+
+ if (evlist__mmap(evlist, 128) < 0) {
+ pr_debug("failed to mmap events: %d (%s)\n", errno,
+ str_error_r(errno, sbuf, sizeof(sbuf)));
+ err = -1;
+ goto out_delete_evlist;
+ }
+
+ evlist__start_workload(evlist);
+
+retry:
+ md = &evlist->mmap[0];
+ if (perf_mmap__read_init(&md->core) < 0)
+ goto out_init;
+
+ while ((event = perf_mmap__read_event(&md->core)) != NULL) {
+ if (event->header.type == PERF_RECORD_EXIT)
+ nr_exit++;
+
+ perf_mmap__consume(&md->core);
+ }
+ perf_mmap__read_done(&md->core);
+
+out_init:
+ if (!exited || !nr_exit) {
+ evlist__poll(evlist, -1);
+
+ if (retry_count++ > 1000) {
+ pr_debug("Failed after retrying 1000 times\n");
+ err = -1;
+ goto out_delete_evlist;
+ }
+
+ goto retry;
+ }
+
+ if (nr_exit != 1) {
+ pr_debug("received %d EXIT records\n", nr_exit);
+ err = -1;
+ }
+
+out_delete_evlist:
+ perf_cpu_map__put(cpus);
+ perf_thread_map__put(threads);
+ evlist__delete(evlist);
+ return err;
+}
+
+DEFINE_SUITE("Number of exit events of a simple workload", task_exit);
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
new file mode 100644
index 000000000..5bbb8f6a4
--- /dev/null
+++ b/tools/perf/tests/tests.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_H
+#define TESTS_H
+
+#include <stdbool.h>
+
+#define TEST_ASSERT_VAL(text, cond) \
+do { \
+ if (!(cond)) { \
+ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
+ return -1; \
+ } \
+} while (0)
+
+#define TEST_ASSERT_EQUAL(text, val, expected) \
+do { \
+ if (val != expected) { \
+ pr_debug("FAILED %s:%d %s (%d != %d)\n", \
+ __FILE__, __LINE__, text, val, expected); \
+ return -1; \
+ } \
+} while (0)
+
+enum {
+ TEST_OK = 0,
+ TEST_FAIL = -1,
+ TEST_SKIP = -2,
+};
+
+struct test_suite;
+
+typedef int (*test_fnptr)(struct test_suite *, int);
+
+struct test_case {
+ const char *name;
+ const char *desc;
+ const char *skip_reason;
+ test_fnptr run_case;
+};
+
+struct test_suite {
+ const char *desc;
+ struct test_case *test_cases;
+ void *priv;
+};
+
+#define DECLARE_SUITE(name) \
+ extern struct test_suite suite__##name;
+
+#define TEST_CASE(description, _name) \
+ { \
+ .name = #_name, \
+ .desc = description, \
+ .run_case = test__##_name, \
+ }
+
+#define TEST_CASE_REASON(description, _name, _reason) \
+ { \
+ .name = #_name, \
+ .desc = description, \
+ .run_case = test__##_name, \
+ .skip_reason = _reason, \
+ }
+
+#define DEFINE_SUITE(description, _name) \
+ struct test_case tests__##_name[] = { \
+ TEST_CASE(description, _name), \
+ { .name = NULL, } \
+ }; \
+ struct test_suite suite__##_name = { \
+ .desc = description, \
+ .test_cases = tests__##_name, \
+ }
+
+/* Tests */
+DECLARE_SUITE(vmlinux_matches_kallsyms);
+DECLARE_SUITE(openat_syscall_event);
+DECLARE_SUITE(openat_syscall_event_on_all_cpus);
+DECLARE_SUITE(basic_mmap);
+DECLARE_SUITE(PERF_RECORD);
+DECLARE_SUITE(perf_evsel__roundtrip_name_test);
+DECLARE_SUITE(perf_evsel__tp_sched_test);
+DECLARE_SUITE(syscall_openat_tp_fields);
+DECLARE_SUITE(pmu);
+DECLARE_SUITE(pmu_events);
+DECLARE_SUITE(attr);
+DECLARE_SUITE(dso_data);
+DECLARE_SUITE(dso_data_cache);
+DECLARE_SUITE(dso_data_reopen);
+DECLARE_SUITE(parse_events);
+DECLARE_SUITE(hists_link);
+DECLARE_SUITE(python_use);
+DECLARE_SUITE(bp_signal);
+DECLARE_SUITE(bp_signal_overflow);
+DECLARE_SUITE(bp_accounting);
+DECLARE_SUITE(wp);
+DECLARE_SUITE(task_exit);
+DECLARE_SUITE(mem);
+DECLARE_SUITE(sw_clock_freq);
+DECLARE_SUITE(code_reading);
+DECLARE_SUITE(sample_parsing);
+DECLARE_SUITE(keep_tracking);
+DECLARE_SUITE(parse_no_sample_id_all);
+DECLARE_SUITE(dwarf_unwind);
+DECLARE_SUITE(expr);
+DECLARE_SUITE(hists_filter);
+DECLARE_SUITE(mmap_thread_lookup);
+DECLARE_SUITE(thread_maps_share);
+DECLARE_SUITE(hists_output);
+DECLARE_SUITE(hists_cumulate);
+DECLARE_SUITE(switch_tracking);
+DECLARE_SUITE(fdarray__filter);
+DECLARE_SUITE(fdarray__add);
+DECLARE_SUITE(kmod_path__parse);
+DECLARE_SUITE(thread_map);
+DECLARE_SUITE(llvm);
+DECLARE_SUITE(bpf);
+DECLARE_SUITE(session_topology);
+DECLARE_SUITE(thread_map_synthesize);
+DECLARE_SUITE(thread_map_remove);
+DECLARE_SUITE(cpu_map_synthesize);
+DECLARE_SUITE(synthesize_stat_config);
+DECLARE_SUITE(synthesize_stat);
+DECLARE_SUITE(synthesize_stat_round);
+DECLARE_SUITE(event_update);
+DECLARE_SUITE(event_times);
+DECLARE_SUITE(backward_ring_buffer);
+DECLARE_SUITE(cpu_map_print);
+DECLARE_SUITE(cpu_map_merge);
+DECLARE_SUITE(sdt_event);
+DECLARE_SUITE(is_printable_array);
+DECLARE_SUITE(bitmap_print);
+DECLARE_SUITE(perf_hooks);
+DECLARE_SUITE(clang);
+DECLARE_SUITE(unit_number__scnprint);
+DECLARE_SUITE(mem2node);
+DECLARE_SUITE(maps__merge_in);
+DECLARE_SUITE(time_utils);
+DECLARE_SUITE(jit_write_elf);
+DECLARE_SUITE(api_io);
+DECLARE_SUITE(demangle_java);
+DECLARE_SUITE(demangle_ocaml);
+DECLARE_SUITE(pfm);
+DECLARE_SUITE(parse_metric);
+DECLARE_SUITE(pe_file_parsing);
+DECLARE_SUITE(expand_cgroup_events);
+DECLARE_SUITE(perf_time_to_tsc);
+DECLARE_SUITE(dlfilter);
+DECLARE_SUITE(sigtrap);
+
+/*
+ * PowerPC and S390 do not support creation of instruction breakpoints using the
+ * perf_event interface.
+ *
+ * ARM requires explicit rounding down of the instruction pointer in Thumb mode,
+ * and then requires the single-step to be handled explicitly in the overflow
+ * handler to avoid stepping into the SIGIO handler and getting stuck on the
+ * breakpointed instruction.
+ *
+ * Since arm64 has the same issue with arm for the single-step handling, this
+ * case also gets stuck on the breakpointed instruction.
+ *
+ * Just disable the test for these architectures until these issues are
+ * resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__) || defined(__aarch64__)
+#define BP_SIGNAL_IS_SUPPORTED 0
+#else
+#define BP_SIGNAL_IS_SUPPORTED 1
+#endif
+
+#ifdef HAVE_DWARF_UNWIND_SUPPORT
+struct thread;
+struct perf_sample;
+int test__arch_unwind_sample(struct perf_sample *sample,
+ struct thread *thread);
+#endif
+
+#if defined(__arm__)
+DECLARE_SUITE(vectors_page);
+#endif
+
+#endif /* TESTS_H */
diff --git a/tools/perf/tests/thread-map.c b/tools/perf/tests/thread-map.c
new file mode 100644
index 000000000..e413c1387
--- /dev/null
+++ b/tools/perf/tests/thread-map.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+#include "tests.h"
+#include "thread_map.h"
+#include "debug.h"
+#include "event.h"
+#include "util/synthetic-events.h"
+#include <linux/zalloc.h>
+#include <perf/event.h>
+
+struct perf_sample;
+struct perf_tool;
+struct machine;
+
+#define NAME (const char *) "perf"
+#define NAMEUL (unsigned long) NAME
+
+static int test__thread_map(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_thread_map *map;
+
+ TEST_ASSERT_VAL("failed to set process name",
+ !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
+
+ /* test map on current pid */
+ map = thread_map__new_by_pid(getpid());
+ TEST_ASSERT_VAL("failed to alloc map", map);
+
+ thread_map__read_comms(map);
+
+ TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+ TEST_ASSERT_VAL("wrong pid",
+ perf_thread_map__pid(map, 0) == getpid());
+ TEST_ASSERT_VAL("wrong comm",
+ perf_thread_map__comm(map, 0) &&
+ !strcmp(perf_thread_map__comm(map, 0), NAME));
+ TEST_ASSERT_VAL("wrong refcnt",
+ refcount_read(&map->refcnt) == 1);
+ perf_thread_map__put(map);
+
+ /* test dummy pid */
+ map = perf_thread_map__new_dummy();
+ TEST_ASSERT_VAL("failed to alloc map", map);
+
+ thread_map__read_comms(map);
+
+ TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+ TEST_ASSERT_VAL("wrong pid", perf_thread_map__pid(map, 0) == -1);
+ TEST_ASSERT_VAL("wrong comm",
+ perf_thread_map__comm(map, 0) &&
+ !strcmp(perf_thread_map__comm(map, 0), "dummy"));
+ TEST_ASSERT_VAL("wrong refcnt",
+ refcount_read(&map->refcnt) == 1);
+ perf_thread_map__put(map);
+ return 0;
+}
+
+static int process_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_record_thread_map *map = &event->thread_map;
+ struct perf_thread_map *threads;
+
+ TEST_ASSERT_VAL("wrong nr", map->nr == 1);
+ TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid());
+ TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME));
+
+ threads = thread_map__new_event(&event->thread_map);
+ TEST_ASSERT_VAL("failed to alloc map", threads);
+
+ TEST_ASSERT_VAL("wrong nr", threads->nr == 1);
+ TEST_ASSERT_VAL("wrong pid",
+ perf_thread_map__pid(threads, 0) == getpid());
+ TEST_ASSERT_VAL("wrong comm",
+ perf_thread_map__comm(threads, 0) &&
+ !strcmp(perf_thread_map__comm(threads, 0), NAME));
+ TEST_ASSERT_VAL("wrong refcnt",
+ refcount_read(&threads->refcnt) == 1);
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test__thread_map_synthesize(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_thread_map *threads;
+
+ TEST_ASSERT_VAL("failed to set process name",
+ !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
+
+ /* test map on current pid */
+ threads = thread_map__new_by_pid(getpid());
+ TEST_ASSERT_VAL("failed to alloc map", threads);
+
+ thread_map__read_comms(threads);
+
+ TEST_ASSERT_VAL("failed to synthesize map",
+ !perf_event__synthesize_thread_map2(NULL, threads, process_event, NULL));
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test__thread_map_remove(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct perf_thread_map *threads;
+ char *str;
+
+ TEST_ASSERT_VAL("failed to allocate map string",
+ asprintf(&str, "%d,%d", getpid(), getppid()) >= 0);
+
+ threads = thread_map__new_str(str, NULL, 0, false);
+ free(str);
+
+ TEST_ASSERT_VAL("failed to allocate thread_map",
+ threads);
+
+ if (verbose > 0)
+ thread_map__fprintf(threads, stderr);
+
+ TEST_ASSERT_VAL("failed to remove thread",
+ !thread_map__remove(threads, 0));
+
+ TEST_ASSERT_VAL("thread_map count != 1", threads->nr == 1);
+
+ if (verbose > 0)
+ thread_map__fprintf(threads, stderr);
+
+ TEST_ASSERT_VAL("failed to remove thread",
+ !thread_map__remove(threads, 0));
+
+ TEST_ASSERT_VAL("thread_map count != 0", threads->nr == 0);
+
+ if (verbose > 0)
+ thread_map__fprintf(threads, stderr);
+
+ TEST_ASSERT_VAL("failed to not remove thread",
+ thread_map__remove(threads, 0));
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+DEFINE_SUITE("Thread map", thread_map);
+DEFINE_SUITE("Synthesize thread map", thread_map_synthesize);
+DEFINE_SUITE("Remove thread map", thread_map_remove);
diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c
new file mode 100644
index 000000000..84edd82c5
--- /dev/null
+++ b/tools/perf/tests/thread-maps-share.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "tests.h"
+#include "machine.h"
+#include "thread.h"
+#include "debug.h"
+
+static int test__thread_maps_share(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ struct machines machines;
+ struct machine *machine;
+
+ /* thread group */
+ struct thread *leader;
+ struct thread *t1, *t2, *t3;
+ struct maps *maps;
+
+ /* other process */
+ struct thread *other, *other_leader;
+ struct maps *other_maps;
+
+ /*
+ * This test create 2 processes abstractions (struct thread)
+ * with several threads and checks they properly share and
+ * maintain maps info (struct maps).
+ *
+ * thread group (pid: 0, tids: 0, 1, 2, 3)
+ * other group (pid: 4, tids: 4, 5)
+ */
+
+ machines__init(&machines);
+ machine = &machines.host;
+
+ /* create process with 4 threads */
+ leader = machine__findnew_thread(machine, 0, 0);
+ t1 = machine__findnew_thread(machine, 0, 1);
+ t2 = machine__findnew_thread(machine, 0, 2);
+ t3 = machine__findnew_thread(machine, 0, 3);
+
+ /* and create 1 separated process, without thread leader */
+ other = machine__findnew_thread(machine, 4, 5);
+
+ TEST_ASSERT_VAL("failed to create threads",
+ leader && t1 && t2 && t3 && other);
+
+ maps = leader->maps;
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 4);
+
+ /* test the maps pointer is shared */
+ TEST_ASSERT_VAL("maps don't match", maps == t1->maps);
+ TEST_ASSERT_VAL("maps don't match", maps == t2->maps);
+ TEST_ASSERT_VAL("maps don't match", maps == t3->maps);
+
+ /*
+ * Verify the other leader was created by previous call.
+ * It should have shared maps with no change in
+ * refcnt.
+ */
+ other_leader = machine__find_thread(machine, 4, 4);
+ TEST_ASSERT_VAL("failed to find other leader", other_leader);
+
+ /*
+ * Ok, now that all the rbtree related operations were done,
+ * lets remove all of them from there so that we can do the
+ * refcounting tests.
+ */
+ machine__remove_thread(machine, leader);
+ machine__remove_thread(machine, t1);
+ machine__remove_thread(machine, t2);
+ machine__remove_thread(machine, t3);
+ machine__remove_thread(machine, other);
+ machine__remove_thread(machine, other_leader);
+
+ other_maps = other->maps;
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_maps->refcnt), 2);
+
+ TEST_ASSERT_VAL("maps don't match", other_maps == other_leader->maps);
+
+ /* release thread group */
+ thread__put(leader);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 3);
+
+ thread__put(t1);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 2);
+
+ thread__put(t2);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&maps->refcnt), 1);
+
+ thread__put(t3);
+
+ /* release other group */
+ thread__put(other_leader);
+ TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(&other_maps->refcnt), 1);
+
+ thread__put(other);
+
+ machines__exit(&machines);
+ return 0;
+}
+
+DEFINE_SUITE("Share thread maps", thread_maps_share);
diff --git a/tools/perf/tests/time-utils-test.c b/tools/perf/tests/time-utils-test.c
new file mode 100644
index 000000000..38df10373
--- /dev/null
+++ b/tools/perf/tests/time-utils-test.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/time64.h>
+#include <inttypes.h>
+#include <string.h>
+#include "time-utils.h"
+#include "evlist.h"
+#include "session.h"
+#include "debug.h"
+#include "tests.h"
+
+static bool test__parse_nsec_time(const char *str, u64 expected)
+{
+ u64 ptime;
+ int err;
+
+ pr_debug("\nparse_nsec_time(\"%s\")\n", str);
+
+ err = parse_nsec_time(str, &ptime);
+ if (err) {
+ pr_debug("error %d\n", err);
+ return false;
+ }
+
+ if (ptime != expected) {
+ pr_debug("Failed. ptime %" PRIu64 " expected %" PRIu64 "\n",
+ ptime, expected);
+ return false;
+ }
+
+ pr_debug("%" PRIu64 "\n", ptime);
+
+ return true;
+}
+
+static bool test__perf_time__parse_str(const char *ostr, u64 start, u64 end)
+{
+ struct perf_time_interval ptime;
+ int err;
+
+ pr_debug("\nperf_time__parse_str(\"%s\")\n", ostr);
+
+ err = perf_time__parse_str(&ptime, ostr);
+ if (err) {
+ pr_debug("Error %d\n", err);
+ return false;
+ }
+
+ if (ptime.start != start || ptime.end != end) {
+ pr_debug("Failed. Expected %" PRIu64 " to %" PRIu64 "\n",
+ start, end);
+ return false;
+ }
+
+ return true;
+}
+
+#define TEST_MAX 64
+
+struct test_data {
+ const char *str;
+ u64 first;
+ u64 last;
+ struct perf_time_interval ptime[TEST_MAX];
+ int num;
+ u64 skip[TEST_MAX];
+ u64 noskip[TEST_MAX];
+};
+
+static bool test__perf_time__parse_for_ranges(struct test_data *d)
+{
+ struct evlist evlist = {
+ .first_sample_time = d->first,
+ .last_sample_time = d->last,
+ };
+ struct perf_session session = { .evlist = &evlist };
+ struct perf_time_interval *ptime = NULL;
+ int range_size, range_num;
+ bool pass = false;
+ int i, err;
+
+ pr_debug("\nperf_time__parse_for_ranges(\"%s\")\n", d->str);
+
+ if (strchr(d->str, '%'))
+ pr_debug("first_sample_time %" PRIu64 " last_sample_time %" PRIu64 "\n",
+ d->first, d->last);
+
+ err = perf_time__parse_for_ranges(d->str, &session, &ptime, &range_size,
+ &range_num);
+ if (err) {
+ pr_debug("error %d\n", err);
+ goto out;
+ }
+
+ if (range_size < d->num || range_num != d->num) {
+ pr_debug("bad size: range_size %d range_num %d expected num %d\n",
+ range_size, range_num, d->num);
+ goto out;
+ }
+
+ for (i = 0; i < d->num; i++) {
+ if (ptime[i].start != d->ptime[i].start ||
+ ptime[i].end != d->ptime[i].end) {
+ pr_debug("bad range %d expected %" PRIu64 " to %" PRIu64 "\n",
+ i, d->ptime[i].start, d->ptime[i].end);
+ goto out;
+ }
+ }
+
+ if (perf_time__ranges_skip_sample(ptime, d->num, 0)) {
+ pr_debug("failed to keep 0\n");
+ goto out;
+ }
+
+ for (i = 0; i < TEST_MAX; i++) {
+ if (d->skip[i] &&
+ !perf_time__ranges_skip_sample(ptime, d->num, d->skip[i])) {
+ pr_debug("failed to skip %" PRIu64 "\n", d->skip[i]);
+ goto out;
+ }
+ if (d->noskip[i] &&
+ perf_time__ranges_skip_sample(ptime, d->num, d->noskip[i])) {
+ pr_debug("failed to keep %" PRIu64 "\n", d->noskip[i]);
+ goto out;
+ }
+ }
+
+ pass = true;
+out:
+ free(ptime);
+ return pass;
+}
+
+static int test__time_utils(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ bool pass = true;
+
+ pass &= test__parse_nsec_time("0", 0);
+ pass &= test__parse_nsec_time("1", 1000000000ULL);
+ pass &= test__parse_nsec_time("0.000000001", 1);
+ pass &= test__parse_nsec_time("1.000000001", 1000000001ULL);
+ pass &= test__parse_nsec_time("123456.123456", 123456123456000ULL);
+ pass &= test__parse_nsec_time("1234567.123456789", 1234567123456789ULL);
+ pass &= test__parse_nsec_time("18446744073.709551615",
+ 0xFFFFFFFFFFFFFFFFULL);
+
+ pass &= test__perf_time__parse_str("1234567.123456789,1234567.123456789",
+ 1234567123456789ULL, 1234567123456789ULL);
+ pass &= test__perf_time__parse_str("1234567.123456789,1234567.123456790",
+ 1234567123456789ULL, 1234567123456790ULL);
+ pass &= test__perf_time__parse_str("1234567.123456789,",
+ 1234567123456789ULL, 0);
+ pass &= test__perf_time__parse_str(",1234567.123456789",
+ 0, 1234567123456789ULL);
+ pass &= test__perf_time__parse_str("0,1234567.123456789",
+ 0, 1234567123456789ULL);
+
+ {
+ u64 b = 1234567123456789ULL;
+ struct test_data d = {
+ .str = "1234567.123456789,1234567.123456790",
+ .ptime = { {b, b + 1}, },
+ .num = 1,
+ .skip = { b - 1, b + 2, },
+ .noskip = { b, b + 1, },
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ {
+ u64 b = 1234567123456789ULL;
+ u64 c = 7654321987654321ULL;
+ u64 e = 8000000000000000ULL;
+ struct test_data d = {
+ .str = "1234567.123456789,1234567.123456790 "
+ "7654321.987654321,7654321.987654444 "
+ "8000000,8000000.000000005",
+ .ptime = { {b, b + 1}, {c, c + 123}, {e, e + 5}, },
+ .num = 3,
+ .skip = { b - 1, b + 2, c - 1, c + 124, e - 1, e + 6 },
+ .noskip = { b, b + 1, c, c + 123, e, e + 5 },
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ {
+ u64 b = 7654321ULL * NSEC_PER_SEC;
+ struct test_data d = {
+ .str = "10%/1",
+ .first = b,
+ .last = b + 100,
+ .ptime = { {b, b + 9}, },
+ .num = 1,
+ .skip = { b - 1, b + 10, },
+ .noskip = { b, b + 9, },
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ {
+ u64 b = 7654321ULL * NSEC_PER_SEC;
+ struct test_data d = {
+ .str = "10%/2",
+ .first = b,
+ .last = b + 100,
+ .ptime = { {b + 10, b + 19}, },
+ .num = 1,
+ .skip = { b + 9, b + 20, },
+ .noskip = { b + 10, b + 19, },
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ {
+ u64 b = 11223344ULL * NSEC_PER_SEC;
+ struct test_data d = {
+ .str = "10%/1,10%/2",
+ .first = b,
+ .last = b + 100,
+ .ptime = { {b, b + 9}, {b + 10, b + 19}, },
+ .num = 2,
+ .skip = { b - 1, b + 20, },
+ .noskip = { b, b + 8, b + 9, b + 10, b + 11, b + 12, b + 19, },
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ {
+ u64 b = 11223344ULL * NSEC_PER_SEC;
+ struct test_data d = {
+ .str = "10%/1,10%/3,10%/10",
+ .first = b,
+ .last = b + 100,
+ .ptime = { {b, b + 9}, {b + 20, b + 29}, { b + 90, b + 100}, },
+ .num = 3,
+ .skip = { b - 1, b + 10, b + 19, b + 30, b + 89, b + 101 },
+ .noskip = { b, b + 9, b + 20, b + 29, b + 90, b + 100},
+ };
+
+ pass &= test__perf_time__parse_for_ranges(&d);
+ }
+
+ pr_debug("\n");
+
+ return pass ? 0 : TEST_FAIL;
+}
+
+DEFINE_SUITE("time utils", time_utils);
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
new file mode 100644
index 000000000..c4630cfc8
--- /dev/null
+++ b/tools/perf/tests/topology.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <perf/cpumap.h>
+#include "cpumap.h"
+#include "tests.h"
+#include "session.h"
+#include "evlist.h"
+#include "debug.h"
+#include "pmu.h"
+#include <linux/err.h>
+
+#define TEMPL "/tmp/perf-test-XXXXXX"
+#define DATA_SIZE 10
+
+static int get_temp(char *path)
+{
+ int fd;
+
+ strcpy(path, TEMPL);
+
+ fd = mkstemp(path);
+ if (fd < 0) {
+ perror("mkstemp failed");
+ return -1;
+ }
+
+ close(fd);
+ return 0;
+}
+
+static int session_write_header(char *path)
+{
+ struct perf_session *session;
+ struct perf_data data = {
+ .path = path,
+ .mode = PERF_DATA_MODE_WRITE,
+ };
+
+ session = perf_session__new(&data, NULL);
+ TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
+
+ if (!perf_pmu__has_hybrid()) {
+ session->evlist = evlist__new_default();
+ TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ } else {
+ struct parse_events_error err;
+
+ session->evlist = evlist__new();
+ TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ parse_events_error__init(&err);
+ parse_events(session->evlist, "cpu_core/cycles/", &err);
+ parse_events_error__exit(&err);
+ }
+
+ perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_NRCPUS);
+ perf_header__set_feat(&session->header, HEADER_ARCH);
+
+ session->header.data_size += DATA_SIZE;
+
+ TEST_ASSERT_VAL("failed to write header",
+ !perf_session__write_header(session, session->evlist, data.file.fd, true));
+
+ evlist__delete(session->evlist);
+ perf_session__delete(session);
+
+ return 0;
+}
+
+static int check_cpu_topology(char *path, struct perf_cpu_map *map)
+{
+ struct perf_session *session;
+ struct perf_data data = {
+ .path = path,
+ .mode = PERF_DATA_MODE_READ,
+ };
+ int i;
+ struct aggr_cpu_id id;
+
+ session = perf_session__new(&data, NULL);
+ TEST_ASSERT_VAL("can't get session", !IS_ERR(session));
+ cpu__setup_cpunode_map();
+
+ /* On platforms with large numbers of CPUs process_cpu_topology()
+ * might issue an error while reading the perf.data file section
+ * HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member
+ * cpu is a NULL pointer.
+ * Example: On s390
+ * CPU 0 is on core_id 0 and physical_package_id 6
+ * CPU 1 is on core_id 1 and physical_package_id 3
+ *
+ * Core_id and physical_package_id are platform and architecture
+ * dependent and might have higher numbers than the CPU id.
+ * This actually depends on the configuration.
+ *
+ * In this case process_cpu_topology() prints error message:
+ * "socket_id number is too big. You may need to upgrade the
+ * perf tool."
+ *
+ * This is the reason why this test might be skipped. aarch64 and
+ * s390 always write this part of the header, even when the above
+ * condition is true (see do_core_id_test in header.c). So always
+ * run this test on those platforms.
+ */
+ if (!session->header.env.cpu
+ && strncmp(session->header.env.arch, "s390", 4)
+ && strncmp(session->header.env.arch, "aarch64", 7))
+ return TEST_SKIP;
+
+ /*
+ * In powerpc pSeries platform, not all the topology information
+ * are exposed via sysfs. Due to restriction, detail like
+ * physical_package_id will be set to -1. Hence skip this
+ * test if physical_package_id returns -1 for cpu from perf_cpu_map.
+ */
+ if (!strncmp(session->header.env.arch, "ppc64le", 7)) {
+ if (cpu__get_socket_id(perf_cpu_map__cpu(map, 0)) == -1)
+ return TEST_SKIP;
+ }
+
+ TEST_ASSERT_VAL("Session header CPU map not set", session->header.env.cpu);
+
+ for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
+ struct perf_cpu cpu = { .cpu = i };
+
+ if (!perf_cpu_map__has(map, cpu))
+ continue;
+ pr_debug("CPU %d, core %d, socket %d\n", i,
+ session->header.env.cpu[i].core_id,
+ session->header.env.cpu[i].socket_id);
+ }
+
+ // Test that CPU ID contains socket, die, core and CPU
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ id = aggr_cpu_id__cpu(perf_cpu_map__cpu(map, i), NULL);
+ TEST_ASSERT_VAL("Cpu map - CPU ID doesn't match",
+ perf_cpu_map__cpu(map, i).cpu == id.cpu.cpu);
+
+ TEST_ASSERT_VAL("Cpu map - Core ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
+ TEST_ASSERT_VAL("Cpu map - Socket ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
+ id.socket);
+
+ TEST_ASSERT_VAL("Cpu map - Die ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
+ TEST_ASSERT_VAL("Cpu map - Node ID is set", id.node == -1);
+ TEST_ASSERT_VAL("Cpu map - Thread IDX is set", id.thread_idx == -1);
+ }
+
+ // Test that core ID contains socket, die and core
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ id = aggr_cpu_id__core(perf_cpu_map__cpu(map, i), NULL);
+ TEST_ASSERT_VAL("Core map - Core ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].core_id == id.core);
+
+ TEST_ASSERT_VAL("Core map - Socket ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
+ id.socket);
+
+ TEST_ASSERT_VAL("Core map - Die ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
+ TEST_ASSERT_VAL("Core map - Node ID is set", id.node == -1);
+ TEST_ASSERT_VAL("Core map - Thread IDX is set", id.thread_idx == -1);
+ }
+
+ // Test that die ID contains socket and die
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ id = aggr_cpu_id__die(perf_cpu_map__cpu(map, i), NULL);
+ TEST_ASSERT_VAL("Die map - Socket ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
+ id.socket);
+
+ TEST_ASSERT_VAL("Die map - Die ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].die_id == id.die);
+
+ TEST_ASSERT_VAL("Die map - Node ID is set", id.node == -1);
+ TEST_ASSERT_VAL("Die map - Core is set", id.core == -1);
+ TEST_ASSERT_VAL("Die map - CPU is set", id.cpu.cpu == -1);
+ TEST_ASSERT_VAL("Die map - Thread IDX is set", id.thread_idx == -1);
+ }
+
+ // Test that socket ID contains only socket
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ id = aggr_cpu_id__socket(perf_cpu_map__cpu(map, i), NULL);
+ TEST_ASSERT_VAL("Socket map - Socket ID doesn't match",
+ session->header.env.cpu[perf_cpu_map__cpu(map, i).cpu].socket_id ==
+ id.socket);
+
+ TEST_ASSERT_VAL("Socket map - Node ID is set", id.node == -1);
+ TEST_ASSERT_VAL("Socket map - Die ID is set", id.die == -1);
+ TEST_ASSERT_VAL("Socket map - Core is set", id.core == -1);
+ TEST_ASSERT_VAL("Socket map - CPU is set", id.cpu.cpu == -1);
+ TEST_ASSERT_VAL("Socket map - Thread IDX is set", id.thread_idx == -1);
+ }
+
+ // Test that node ID contains only node
+ for (i = 0; i < perf_cpu_map__nr(map); i++) {
+ id = aggr_cpu_id__node(perf_cpu_map__cpu(map, i), NULL);
+ TEST_ASSERT_VAL("Node map - Node ID doesn't match",
+ cpu__get_node(perf_cpu_map__cpu(map, i)) == id.node);
+ TEST_ASSERT_VAL("Node map - Socket is set", id.socket == -1);
+ TEST_ASSERT_VAL("Node map - Die ID is set", id.die == -1);
+ TEST_ASSERT_VAL("Node map - Core is set", id.core == -1);
+ TEST_ASSERT_VAL("Node map - CPU is set", id.cpu.cpu == -1);
+ TEST_ASSERT_VAL("Node map - Thread IDX is set", id.thread_idx == -1);
+ }
+ perf_session__delete(session);
+
+ return 0;
+}
+
+static int test__session_topology(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ char path[PATH_MAX];
+ struct perf_cpu_map *map;
+ int ret = TEST_FAIL;
+
+ TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
+
+ pr_debug("templ file: %s\n", path);
+
+ if (session_write_header(path))
+ goto free_path;
+
+ map = perf_cpu_map__new(NULL);
+ if (map == NULL) {
+ pr_debug("failed to get system cpumap\n");
+ goto free_path;
+ }
+
+ ret = check_cpu_topology(path, map);
+ perf_cpu_map__put(map);
+
+free_path:
+ unlink(path);
+ return ret;
+}
+
+DEFINE_SUITE("Session topology", session_topology);
diff --git a/tools/perf/tests/unit_number__scnprintf.c b/tools/perf/tests/unit_number__scnprintf.c
new file mode 100644
index 000000000..88bcada1c
--- /dev/null
+++ b/tools/perf/tests/unit_number__scnprintf.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <string.h>
+#include "tests.h"
+#include "units.h"
+#include "debug.h"
+
+static int test__unit_number__scnprint(struct test_suite *t __maybe_unused, int subtest __maybe_unused)
+{
+ struct {
+ u64 n;
+ const char *str;
+ } test[] = {
+ { 1, "1B" },
+ { 10*1024, "10K" },
+ { 20*1024*1024, "20M" },
+ { 30*1024*1024*1024ULL, "30G" },
+ { 0, "0B" },
+ { 0, NULL },
+ };
+ unsigned i = 0;
+
+ while (test[i].str) {
+ char buf[100];
+
+ unit_number__scnprintf(buf, sizeof(buf), test[i].n);
+
+ pr_debug("n %" PRIu64 ", str '%s', buf '%s'\n",
+ test[i].n, test[i].str, buf);
+
+ if (strcmp(test[i].str, buf))
+ return TEST_FAIL;
+
+ i++;
+ }
+
+ return TEST_OK;
+}
+
+DEFINE_SUITE("unit_number__scnprintf", unit_number__scnprint);
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
new file mode 100644
index 000000000..8ab035b55
--- /dev/null
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/rbtree.h>
+#include <inttypes.h>
+#include <string.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include "dso.h"
+#include "map.h"
+#include "symbol.h"
+#include <internal/lib.h> // page_size
+#include "tests.h"
+#include "debug.h"
+#include "machine.h"
+
+#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
+
+static bool is_ignored_symbol(const char *name, char type)
+{
+ /* Symbol names that exactly match to the following are ignored.*/
+ static const char * const ignored_symbols[] = {
+ /*
+ * Symbols which vary between passes. Passes 1 and 2 must have
+ * identical symbol lists. The kallsyms_* symbols below are
+ * only added after pass 1, they would be included in pass 2
+ * when --all-symbols is specified so exclude them to get a
+ * stable symbol list.
+ */
+ "kallsyms_addresses",
+ "kallsyms_offsets",
+ "kallsyms_relative_base",
+ "kallsyms_num_syms",
+ "kallsyms_names",
+ "kallsyms_markers",
+ "kallsyms_token_table",
+ "kallsyms_token_index",
+ /* Exclude linker generated symbols which vary between passes */
+ "_SDA_BASE_", /* ppc */
+ "_SDA2_BASE_", /* ppc */
+ NULL
+ };
+
+ /* Symbol names that begin with the following are ignored.*/
+ static const char * const ignored_prefixes[] = {
+ "$", /* local symbols for ARM, MIPS, etc. */
+ ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
+ "__crc_", /* modversions */
+ "__efistub_", /* arm64 EFI stub namespace */
+ "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
+ "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
+ "__AArch64ADRPThunk_", /* arm64 lld */
+ "__ARMV5PILongThunk_", /* arm lld */
+ "__ARMV7PILongThunk_",
+ "__ThumbV7PILongThunk_",
+ "__LA25Thunk_", /* mips lld */
+ "__microLA25Thunk_",
+ NULL
+ };
+
+ /* Symbol names that end with the following are ignored.*/
+ static const char * const ignored_suffixes[] = {
+ "_from_arm", /* arm */
+ "_from_thumb", /* arm */
+ "_veneer", /* arm */
+ NULL
+ };
+
+ /* Symbol names that contain the following are ignored.*/
+ static const char * const ignored_matches[] = {
+ ".long_branch.", /* ppc stub */
+ ".plt_branch.", /* ppc stub */
+ NULL
+ };
+
+ const char * const *p;
+
+ for (p = ignored_symbols; *p; p++)
+ if (!strcmp(name, *p))
+ return true;
+
+ for (p = ignored_prefixes; *p; p++)
+ if (!strncmp(name, *p, strlen(*p)))
+ return true;
+
+ for (p = ignored_suffixes; *p; p++) {
+ int l = strlen(name) - strlen(*p);
+
+ if (l >= 0 && !strcmp(name + l, *p))
+ return true;
+ }
+
+ for (p = ignored_matches; *p; p++) {
+ if (strstr(name, *p))
+ return true;
+ }
+
+ if (type == 'U' || type == 'u')
+ return true;
+ /* exclude debugging symbols */
+ if (type == 'N' || type == 'n')
+ return true;
+
+ if (toupper(type) == 'A') {
+ /* Keep these useful absolute symbols */
+ if (strcmp(name, "__kernel_syscall_via_break") &&
+ strcmp(name, "__kernel_syscall_via_epc") &&
+ strcmp(name, "__kernel_sigtramp") &&
+ strcmp(name, "__gp"))
+ return true;
+ }
+
+ return false;
+}
+
+static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ int err = TEST_FAIL;
+ struct rb_node *nd;
+ struct symbol *sym;
+ struct map *kallsyms_map, *vmlinux_map, *map;
+ struct machine kallsyms, vmlinux;
+ struct maps *maps;
+ u64 mem_start, mem_end;
+ bool header_printed;
+
+ /*
+ * Step 1:
+ *
+ * Init the machines that will hold kernel, modules obtained from
+ * both vmlinux + .ko files and from /proc/kallsyms split by modules.
+ */
+ machine__init(&kallsyms, "", HOST_KERNEL_ID);
+ machine__init(&vmlinux, "", HOST_KERNEL_ID);
+
+ maps = machine__kernel_maps(&vmlinux);
+
+ /*
+ * Step 2:
+ *
+ * Create the kernel maps for kallsyms and the DSO where we will then
+ * load /proc/kallsyms. Also create the modules maps from /proc/modules
+ * and find the .ko files that match them in /lib/modules/`uname -r`/.
+ */
+ if (machine__create_kernel_maps(&kallsyms) < 0) {
+ pr_debug("machine__create_kernel_maps failed");
+ err = TEST_SKIP;
+ goto out;
+ }
+
+ /*
+ * Step 3:
+ *
+ * Load and split /proc/kallsyms into multiple maps, one per module.
+ * Do not use kcore, as this test was designed before kcore support
+ * and has parts that only make sense if using the non-kcore code.
+ * XXX: extend it to stress the kcorre code as well, hint: the list
+ * of modules extracted from /proc/kcore, in its current form, can't
+ * be compacted against the list of modules found in the "vmlinux"
+ * code and with the one got from /proc/modules from the "kallsyms" code.
+ */
+ if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
+ pr_debug("machine__load_kallsyms failed");
+ err = TEST_SKIP;
+ goto out;
+ }
+
+ /*
+ * Step 4:
+ *
+ * kallsyms will be internally on demand sorted by name so that we can
+ * find the reference relocation * symbol, i.e. the symbol we will use
+ * to see if the running kernel was relocated by checking if it has the
+ * same value in the vmlinux file we load.
+ */
+ kallsyms_map = machine__kernel_map(&kallsyms);
+
+ /*
+ * Step 5:
+ *
+ * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
+ */
+ if (machine__create_kernel_maps(&vmlinux) < 0) {
+ pr_info("machine__create_kernel_maps failed");
+ goto out;
+ }
+
+ vmlinux_map = machine__kernel_map(&vmlinux);
+
+ /*
+ * Step 6:
+ *
+ * Locate a vmlinux file in the vmlinux path that has a buildid that
+ * matches the one of the running kernel.
+ *
+ * While doing that look if we find the ref reloc symbol, if we find it
+ * we'll have its ref_reloc_symbol.unrelocated_addr and then
+ * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
+ * to fixup the symbols.
+ */
+ if (machine__load_vmlinux_path(&vmlinux) <= 0) {
+ pr_info("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
+ err = TEST_SKIP;
+ goto out;
+ }
+
+ err = 0;
+ /*
+ * Step 7:
+ *
+ * Now look at the symbols in the vmlinux DSO and check if we find all of them
+ * in the kallsyms dso. For the ones that are in both, check its names and
+ * end addresses too.
+ */
+ map__for_each_symbol(vmlinux_map, sym, nd) {
+ struct symbol *pair, *first_pair;
+
+ sym = rb_entry(nd, struct symbol, rb_node);
+
+ if (sym->start == sym->end)
+ continue;
+
+ mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
+ mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
+
+ first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
+ pair = first_pair;
+
+ if (pair && UM(pair->start) == mem_start) {
+next_pair:
+ if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
+ /*
+ * kallsyms don't have the symbol end, so we
+ * set that by using the next symbol start - 1,
+ * in some cases we get this up to a page
+ * wrong, trace_kmalloc when I was developing
+ * this code was one such example, 2106 bytes
+ * off the real size. More than that and we
+ * _really_ have a problem.
+ */
+ s64 skew = mem_end - UM(pair->end);
+ if (llabs(skew) >= page_size)
+ pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
+ mem_start, sym->name, mem_end,
+ UM(pair->end));
+
+ /*
+ * Do not count this as a failure, because we
+ * could really find a case where it's not
+ * possible to get proper function end from
+ * kallsyms.
+ */
+ continue;
+ } else {
+ pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
+ if (pair) {
+ if (UM(pair->start) == mem_start)
+ goto next_pair;
+
+ pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
+ mem_start, sym->name, pair->name);
+ } else {
+ pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
+ mem_start, sym->name, first_pair->name);
+ }
+
+ continue;
+ }
+ } else if (mem_start == kallsyms.vmlinux_map->end) {
+ /*
+ * Ignore aliases to _etext, i.e. to the end of the kernel text area,
+ * such as __indirect_thunk_end.
+ */
+ continue;
+ } else if (is_ignored_symbol(sym->name, sym->type)) {
+ /*
+ * Ignore hidden symbols, see scripts/kallsyms.c for the details
+ */
+ continue;
+ } else {
+ pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
+ mem_start, sym->name);
+ }
+
+ err = -1;
+ }
+
+ if (verbose <= 0)
+ goto out;
+
+ header_printed = false;
+
+ maps__for_each_entry(maps, map) {
+ struct map *
+ /*
+ * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
+ * the kernel will have the path for the vmlinux file being used,
+ * so use the short name, less descriptive but the same ("[kernel]" in
+ * both cases.
+ */
+ pair = maps__find_by_name(kallsyms.kmaps, (map->dso->kernel ?
+ map->dso->short_name :
+ map->dso->name));
+ if (pair) {
+ pair->priv = 1;
+ } else {
+ if (!header_printed) {
+ pr_info("WARN: Maps only in vmlinux:\n");
+ header_printed = true;
+ }
+ map__fprintf(map, stderr);
+ }
+ }
+
+ header_printed = false;
+
+ maps__for_each_entry(maps, map) {
+ struct map *pair;
+
+ mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
+ mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
+
+ pair = maps__find(kallsyms.kmaps, mem_start);
+ if (pair == NULL || pair->priv)
+ continue;
+
+ if (pair->start == mem_start) {
+ if (!header_printed) {
+ pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
+ header_printed = true;
+ }
+
+ pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
+ map->start, map->end, map->pgoff, map->dso->name);
+ if (mem_end != pair->end)
+ pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
+ pair->start, pair->end, pair->pgoff);
+ pr_info(" %s\n", pair->dso->name);
+ pair->priv = 1;
+ }
+ }
+
+ header_printed = false;
+
+ maps = machine__kernel_maps(&kallsyms);
+
+ maps__for_each_entry(maps, map) {
+ if (!map->priv) {
+ if (!header_printed) {
+ pr_info("WARN: Maps only in kallsyms:\n");
+ header_printed = true;
+ }
+ map__fprintf(map, stderr);
+ }
+ }
+out:
+ machine__exit(&kallsyms);
+ machine__exit(&vmlinux);
+ return err;
+}
+
+DEFINE_SUITE("vmlinux symtab matches kallsyms", vmlinux_matches_kallsyms);
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
new file mode 100644
index 000000000..56455da30
--- /dev/null
+++ b/tools/perf/tests/wp.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <linux/compiler.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/kernel.h>
+#include "tests.h"
+#include "debug.h"
+#include "event.h"
+#include "cloexec.h"
+#include "../perf-sys.h"
+
+#define WP_TEST_ASSERT_VAL(fd, text, val) \
+do { \
+ long long count; \
+ wp_read(fd, &count, sizeof(long long)); \
+ TEST_ASSERT_VAL(text, count == val); \
+} while (0)
+
+volatile u64 data1;
+volatile u8 data2[3];
+
+#ifndef __s390x__
+static int wp_read(int fd, long long *count, int size)
+{
+ int ret = read(fd, count, size);
+
+ if (ret != size) {
+ pr_debug("failed to read: %d\n", ret);
+ return -1;
+ }
+ return 0;
+}
+
+static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
+ void *wp_addr, unsigned long wp_len)
+{
+ memset(attr, 0, sizeof(struct perf_event_attr));
+ attr->type = PERF_TYPE_BREAKPOINT;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->config = 0;
+ attr->bp_type = wp_type;
+ attr->bp_addr = (unsigned long)wp_addr;
+ attr->bp_len = wp_len;
+ attr->sample_period = 1;
+ attr->sample_type = PERF_SAMPLE_IP;
+ attr->exclude_kernel = 1;
+ attr->exclude_hv = 1;
+}
+
+static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
+{
+ int fd;
+ struct perf_event_attr attr;
+
+ get__perf_event_attr(&attr, wp_type, wp_addr, wp_len);
+ fd = sys_perf_event_open(&attr, 0, -1, -1,
+ perf_event_open_cloexec_flag());
+ if (fd < 0)
+ pr_debug("failed opening event %x\n", attr.bp_type);
+
+ return fd;
+}
+#endif
+
+static int test__wp_ro(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#if defined(__s390x__) || defined(__x86_64__) || defined(__i386__)
+ return TEST_SKIP;
+#else
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_R, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "RO watchpoint", 1);
+
+ close(fd);
+ return 0;
+#endif
+}
+
+static int test__wp_wo(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 0);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "WO watchpoint", 1);
+
+ close(fd);
+ return 0;
+#endif
+}
+
+static int test__wp_rw(struct test_suite *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
+ int fd;
+ unsigned long tmp, tmp1 = rand();
+
+ fd = __event(HW_BREAKPOINT_R | HW_BREAKPOINT_W, (void *)&data1,
+ sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ tmp = data1;
+ WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 1);
+
+ data1 = tmp1 + tmp;
+ WP_TEST_ASSERT_VAL(fd, "RW watchpoint", 2);
+
+ close(fd);
+ return 0;
+#endif
+}
+
+static int test__wp_modify(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+#if defined(__s390x__)
+ return TEST_SKIP;
+#else
+ int fd, ret;
+ unsigned long tmp = rand();
+ struct perf_event_attr new_attr;
+
+ fd = __event(HW_BREAKPOINT_W, (void *)&data1, sizeof(data1));
+ if (fd < 0)
+ return -1;
+
+ data1 = tmp;
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
+
+ /* Modify watchpoint with disabled = 1 */
+ get__perf_event_attr(&new_attr, HW_BREAKPOINT_W, (void *)&data2[0],
+ sizeof(u8) * 2);
+ new_attr.disabled = 1;
+ ret = ioctl(fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr);
+ if (ret < 0) {
+ if (errno == ENOTTY) {
+ test->test_cases[subtest].skip_reason = "missing kernel support";
+ ret = TEST_SKIP;
+ }
+
+ pr_debug("ioctl(PERF_EVENT_IOC_MODIFY_ATTRIBUTES) failed\n");
+ close(fd);
+ return ret;
+ }
+
+ data2[1] = tmp; /* Not Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 1);
+
+ /* Enable the event */
+ ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
+ if (ret < 0) {
+ pr_debug("Failed to enable event\n");
+ close(fd);
+ return ret;
+ }
+
+ data2[1] = tmp; /* Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
+
+ data2[2] = tmp; /* Not Counted */
+ WP_TEST_ASSERT_VAL(fd, "Modify watchpoint", 2);
+
+ close(fd);
+ return 0;
+#endif
+}
+
+static struct test_case wp_tests[] = {
+ TEST_CASE_REASON("Read Only Watchpoint", wp_ro, "missing hardware support"),
+ TEST_CASE_REASON("Write Only Watchpoint", wp_wo, "missing hardware support"),
+ TEST_CASE_REASON("Read / Write Watchpoint", wp_rw, "missing hardware support"),
+ TEST_CASE_REASON("Modify Watchpoint", wp_modify, "missing hardware support"),
+ { .name = NULL, }
+};
+
+struct test_suite suite__wp = {
+ .desc = "Watchpoint",
+ .test_cases = wp_tests,
+};