summaryrefslogtreecommitdiffstats
path: root/tools/perf/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/tests')
-rw-r--r--tools/perf/tests/Build2
-rw-r--r--tools/perf/tests/builtin-test-list.c207
-rw-r--r--tools/perf/tests/builtin-test-list.h12
-rw-r--r--tools/perf/tests/builtin-test.c378
-rw-r--r--tools/perf/tests/expand-cgroup.c3
-rw-r--r--tools/perf/tests/make4
-rw-r--r--tools/perf/tests/maps.c3
-rw-r--r--tools/perf/tests/parse-events.c9
-rw-r--r--tools/perf/tests/pmu-events.c22
-rw-r--r--tools/perf/tests/shell/base_probe/settings.sh48
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_kernel.sh278
-rwxr-xr-xtools/perf/tests/shell/common/check_all_lines_matched.pl39
-rwxr-xr-xtools/perf/tests/shell/common/check_all_patterns_found.pl34
-rwxr-xr-xtools/perf/tests/shell/common/check_no_patterns_found.pl34
-rw-r--r--tools/perf/tests/shell/common/init.sh117
-rw-r--r--tools/perf/tests/shell/common/patterns.sh268
-rw-r--r--tools/perf/tests/shell/common/settings.sh79
-rw-r--r--tools/perf/tests/shell/lib/perf_has_symbol.sh2
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py4
-rw-r--r--tools/perf/tests/shell/lib/perf_metric_validation.py231
-rw-r--r--tools/perf/tests/shell/lib/stat_output.sh12
-rwxr-xr-xtools/perf/tests/shell/perftool-testsuite_probe.sh23
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh2
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh13
-rwxr-xr-xtools/perf/tests/shell/stat+std_output.sh4
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_metrics_values.sh4
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh6
-rw-r--r--tools/perf/tests/symbols.c68
-rw-r--r--tools/perf/tests/tests-scripts.c257
-rw-r--r--tools/perf/tests/tests-scripts.h9
-rw-r--r--tools/perf/tests/tests.h16
-rw-r--r--tools/perf/tests/thread-maps-share.c8
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c10
34 files changed, 1670 insertions, 548 deletions
diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build
index 53ba9c3e20..c7f9d96760 100644
--- a/tools/perf/tests/Build
+++ b/tools/perf/tests/Build
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
perf-y += builtin-test.o
-perf-y += builtin-test-list.o
+perf-y += tests-scripts.o
perf-y += parse-events.o
perf-y += dso-data.o
perf-y += attr.o
diff --git a/tools/perf/tests/builtin-test-list.c b/tools/perf/tests/builtin-test-list.c
deleted file mode 100644
index a65b9e547d..0000000000
--- a/tools/perf/tests/builtin-test-list.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <dirent.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <linux/ctype.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/zalloc.h>
-#include <string.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <subcmd/exec-cmd.h>
-#include <subcmd/parse-options.h>
-#include <sys/wait.h>
-#include <sys/stat.h>
-#include "builtin.h"
-#include "builtin-test-list.h"
-#include "color.h"
-#include "debug.h"
-#include "hist.h"
-#include "intlist.h"
-#include "string2.h"
-#include "symbol.h"
-#include "tests.h"
-#include "util/rlimit.h"
-
-
-/*
- * As this is a singleton built once for the run of the process, there is
- * no value in trying to free it and just let it stay around until process
- * exits when it's cleaned up.
- */
-static size_t files_num = 0;
-static struct script_file *files = NULL;
-static int files_max_width = 0;
-
-static const char *shell_tests__dir(char *path, size_t size)
-{
- const char *devel_dirs[] = { "./tools/perf/tests", "./tests", };
- char *exec_path;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
- struct stat st;
-
- if (!lstat(devel_dirs[i], &st)) {
- scnprintf(path, size, "%s/shell", devel_dirs[i]);
- if (!lstat(devel_dirs[i], &st))
- return path;
- }
- }
-
- /* Then installed path. */
- exec_path = get_argv_exec_path();
- scnprintf(path, size, "%s/tests/shell", exec_path);
- free(exec_path);
- return path;
-}
-
-static const char *shell_test__description(char *description, size_t size,
- const char *path, const char *name)
-{
- FILE *fp;
- char filename[PATH_MAX];
- int ch;
-
- path__join(filename, sizeof(filename), path, name);
- fp = fopen(filename, "r");
- if (!fp)
- return NULL;
-
- /* Skip first line - should be #!/bin/sh Shebang */
- do {
- ch = fgetc(fp);
- } while (ch != EOF && ch != '\n');
-
- description = fgets(description, size, fp);
- fclose(fp);
-
- /* Assume first char on line is omment everything after that desc */
- return description ? strim(description + 1) : NULL;
-}
-
-/* Is this full file path a shell script */
-static bool is_shell_script(const char *path)
-{
- const char *ext;
-
- ext = strrchr(path, '.');
- if (!ext)
- return false;
- if (!strcmp(ext, ".sh")) { /* Has .sh extension */
- if (access(path, R_OK | X_OK) == 0) /* Is executable */
- return true;
- }
- return false;
-}
-
-/* Is this file in this dir a shell script (for test purposes) */
-static bool is_test_script(const char *path, const char *name)
-{
- char filename[PATH_MAX];
-
- path__join(filename, sizeof(filename), path, name);
- if (!is_shell_script(filename)) return false;
- return true;
-}
-
-/* Duplicate a string and fall over and die if we run out of memory */
-static char *strdup_check(const char *str)
-{
- char *newstr;
-
- newstr = strdup(str);
- if (!newstr) {
- pr_err("Out of memory while duplicating test script string\n");
- abort();
- }
- return newstr;
-}
-
-static void append_script(const char *dir, const char *file, const char *desc)
-{
- struct script_file *files_tmp;
- size_t files_num_tmp;
- int width;
-
- files_num_tmp = files_num + 1;
- if (files_num_tmp >= SIZE_MAX) {
- pr_err("Too many script files\n");
- abort();
- }
- /* Realloc is good enough, though we could realloc by chunks, not that
- * anyone will ever measure performance here */
- files_tmp = realloc(files,
- (files_num_tmp + 1) * sizeof(struct script_file));
- if (files_tmp == NULL) {
- pr_err("Out of memory while building test list\n");
- abort();
- }
- /* Add file to end and NULL terminate the struct array */
- files = files_tmp;
- files_num = files_num_tmp;
- files[files_num - 1].dir = strdup_check(dir);
- files[files_num - 1].file = strdup_check(file);
- files[files_num - 1].desc = strdup_check(desc);
- files[files_num].dir = NULL;
- files[files_num].file = NULL;
- files[files_num].desc = NULL;
-
- width = strlen(desc); /* Track max width of desc */
- if (width > files_max_width)
- files_max_width = width;
-}
-
-static void append_scripts_in_dir(const char *path)
-{
- struct dirent **entlist;
- struct dirent *ent;
- int n_dirs, i;
- char filename[PATH_MAX];
-
- /* List files, sorted by alpha */
- n_dirs = scandir(path, &entlist, NULL, alphasort);
- if (n_dirs == -1)
- return;
- for (i = 0; i < n_dirs && (ent = entlist[i]); i++) {
- if (ent->d_name[0] == '.')
- continue; /* Skip hidden files */
- if (is_test_script(path, ent->d_name)) { /* It's a test */
- char bf[256];
- const char *desc = shell_test__description
- (bf, sizeof(bf), path, ent->d_name);
-
- if (desc) /* It has a desc line - valid script */
- append_script(path, ent->d_name, desc);
- } else if (is_directory(path, ent)) { /* Scan the subdir */
- path__join(filename, sizeof(filename),
- path, ent->d_name);
- append_scripts_in_dir(filename);
- }
- }
- for (i = 0; i < n_dirs; i++) /* Clean up */
- zfree(&entlist[i]);
- free(entlist);
-}
-
-const struct script_file *list_script_files(void)
-{
- char path_dir[PATH_MAX];
- const char *path;
-
- if (files)
- return files; /* Singleton - we already know our list */
-
- path = shell_tests__dir(path_dir, sizeof(path_dir)); /* Walk dir */
- append_scripts_in_dir(path);
-
- return files;
-}
-
-int list_script_max_width(void)
-{
- list_script_files(); /* Ensure we have scanned all scripts */
- return files_max_width;
-}
diff --git a/tools/perf/tests/builtin-test-list.h b/tools/perf/tests/builtin-test-list.h
deleted file mode 100644
index eb81f3aa66..0000000000
--- a/tools/perf/tests/builtin-test-list.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-struct script_file {
- char *dir;
- char *file;
- char *desc;
-};
-
-/* List available script tests to run - singleton - never freed */
-const struct script_file *list_script_files(void);
-/* Get maximum width of description string */
-int list_script_max_width(void);
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 4a5973f9bb..d13ee7683d 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -6,6 +6,7 @@
*/
#include <fcntl.h>
#include <errno.h>
+#include <poll.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
@@ -21,17 +22,25 @@
#include "debug.h"
#include "color.h"
#include <subcmd/parse-options.h>
+#include <subcmd/run-command.h>
#include "string2.h"
#include "symbol.h"
#include "util/rlimit.h"
+#include "util/strbuf.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <subcmd/exec-cmd.h>
#include <linux/zalloc.h>
-#include "builtin-test-list.h"
+#include "tests-scripts.h"
+/*
+ * Command line option to not fork the test running in the same process and
+ * making them easier to debug.
+ */
static bool dont_fork;
+/* Fork the tests in parallel and then wait for their completion. */
+static bool parallel;
const char *dso_to_test;
const char *test_objdump_path = "objdump";
@@ -130,6 +139,7 @@ static struct test_suite *generic_tests[] = {
static struct test_suite **tests[] = {
generic_tests,
arch_tests,
+ NULL, /* shell tests created at runtime. */
};
static struct test_workload *workloads[] = {
@@ -208,76 +218,36 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
return false;
}
-static int run_test(struct test_suite *test, int subtest)
-{
- int status, err = -1, child = dont_fork ? 0 : fork();
- char sbuf[STRERR_BUFSIZE];
-
- if (child < 0) {
- pr_err("failed to fork test: %s\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- return -1;
- }
-
- if (!child) {
- if (!dont_fork) {
- pr_debug("test child forked, pid %d\n", getpid());
-
- if (verbose <= 0) {
- int nullfd = open("/dev/null", O_WRONLY);
-
- if (nullfd >= 0) {
- close(STDERR_FILENO);
- close(STDOUT_FILENO);
-
- dup2(nullfd, STDOUT_FILENO);
- dup2(STDOUT_FILENO, STDERR_FILENO);
- close(nullfd);
- }
- } else {
- signal(SIGSEGV, sighandler_dump_stack);
- signal(SIGFPE, sighandler_dump_stack);
- }
- }
-
- err = test_function(test, subtest)(test, subtest);
- if (!dont_fork)
- exit(err);
- }
-
- if (!dont_fork) {
- wait(&status);
+struct child_test {
+ struct child_process process;
+ struct test_suite *test;
+ int test_num;
+ int subtest;
+};
- if (WIFEXITED(status)) {
- err = (signed char)WEXITSTATUS(status);
- pr_debug("test child finished with %d\n", err);
- } else if (WIFSIGNALED(status)) {
- err = -1;
- pr_debug("test child interrupted\n");
- }
- }
+static int run_test_child(struct child_process *process)
+{
+ struct child_test *child = container_of(process, struct child_test, process);
+ int err;
- return err;
+ pr_debug("--- start ---\n");
+ pr_debug("test child forked, pid %d\n", getpid());
+ err = test_function(child->test, child->subtest)(child->test, child->subtest);
+ pr_debug("---- end(%d) ----\n", err);
+ fflush(NULL);
+ return -err;
}
-#define for_each_test(j, k, t) \
- for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
- while ((t = tests[j][k++]) != NULL)
-
-static int test_and_print(struct test_suite *t, int subtest)
+static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width)
{
- int err;
-
- pr_debug("\n--- start ---\n");
- err = run_test(t, subtest);
- pr_debug("---- end ----\n");
+ if (has_subtests(t)) {
+ int subw = width > 2 ? width - 2 : width;
- if (!has_subtests(t))
- pr_debug("%s:", t->desc);
- else
- pr_debug("%s subtest %d:", t->desc, subtest + 1);
+ pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
+ } else
+ pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
- switch (err) {
+ switch (result) {
case TEST_OK:
pr_info(" Ok\n");
break;
@@ -296,99 +266,186 @@ static int test_and_print(struct test_suite *t, int subtest)
break;
}
- return err;
+ return 0;
}
-struct shell_test {
- const char *dir;
- const char *file;
-};
-
-static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
+static int finish_test(struct child_test *child_test, int width)
{
- int err;
- char script[PATH_MAX];
- struct shell_test *st = test->priv;
+ struct test_suite *t = child_test->test;
+ int i = child_test->test_num;
+ int subi = child_test->subtest;
+ int out = child_test->process.out;
+ int err = child_test->process.err;
+ bool out_done = out <= 0;
+ bool err_done = err <= 0;
+ struct strbuf out_output = STRBUF_INIT;
+ struct strbuf err_output = STRBUF_INIT;
+ int ret;
- path__join(script, sizeof(script) - 3, st->dir, st->file);
+ /*
+ * For test suites with subtests, display the suite name ahead of the
+ * sub test names.
+ */
+ if (has_subtests(t) && subi == 0)
+ pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
+
+ /*
+ * Busy loop reading from the child's stdout and stderr that are set to
+ * be non-blocking until EOF.
+ */
+ if (!out_done)
+ fcntl(out, F_SETFL, O_NONBLOCK);
+ if (!err_done)
+ fcntl(err, F_SETFL, O_NONBLOCK);
+ if (verbose > 1) {
+ if (has_subtests(t))
+ pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
+ else
+ pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+ }
+ while (!out_done || !err_done) {
+ struct pollfd pfds[2] = {
+ { .fd = out,
+ .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
+ },
+ { .fd = err,
+ .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
+ },
+ };
+ char buf[512];
+ ssize_t len;
- if (verbose > 0)
- strncat(script, " -v", sizeof(script) - strlen(script) - 1);
+ /* Poll to avoid excessive spinning, timeout set for 1000ms. */
+ poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/1000);
+ if (!out_done && pfds[0].revents) {
+ errno = 0;
+ len = read(out, buf, sizeof(buf) - 1);
- err = system(script);
- if (!err)
- return TEST_OK;
+ if (len <= 0) {
+ out_done = errno != EAGAIN;
+ } else {
+ buf[len] = '\0';
+ if (verbose > 1)
+ fprintf(stdout, "%s", buf);
+ else
+ strbuf_addstr(&out_output, buf);
+ }
+ }
+ if (!err_done && pfds[1].revents) {
+ errno = 0;
+ len = read(err, buf, sizeof(buf) - 1);
- return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
+ if (len <= 0) {
+ err_done = errno != EAGAIN;
+ } else {
+ buf[len] = '\0';
+ if (verbose > 1)
+ fprintf(stdout, "%s", buf);
+ else
+ strbuf_addstr(&err_output, buf);
+ }
+ }
+ }
+ /* Clean up child process. */
+ ret = finish_command(&child_test->process);
+ if (verbose == 1 && ret == TEST_FAIL) {
+ /* Add header for test that was skipped above. */
+ if (has_subtests(t))
+ pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
+ else
+ pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+ fprintf(stdout, "%s", out_output.buf);
+ fprintf(stderr, "%s", err_output.buf);
+ }
+ strbuf_release(&out_output);
+ strbuf_release(&err_output);
+ print_test_result(t, i, subi, ret, width);
+ if (out > 0)
+ close(out);
+ if (err > 0)
+ close(err);
+ return 0;
}
-static int run_shell_tests(int argc, const char *argv[], int i, int width,
- struct intlist *skiplist)
+static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
+ int width)
{
- struct shell_test st;
- const struct script_file *files, *file;
+ int err;
- files = list_script_files();
- if (!files)
+ *child = NULL;
+ if (dont_fork) {
+ pr_debug("--- start ---\n");
+ err = test_function(test, subi)(test, subi);
+ pr_debug("---- end ----\n");
+ print_test_result(test, i, subi, err, width);
return 0;
- for (file = files; file->dir; file++) {
- int curr = i++;
- struct test_case test_cases[] = {
- {
- .desc = file->desc,
- .run_case = shell_test__run,
- },
- { .name = NULL, }
- };
- struct test_suite test_suite = {
- .desc = test_cases[0].desc,
- .test_cases = test_cases,
- .priv = &st,
- };
- st.dir = file->dir;
-
- if (test_suite.desc == NULL ||
- !perf_test__matches(test_suite.desc, curr, argc, argv))
- continue;
-
- st.file = file->file;
- pr_info("%3d: %-*s:", i, width, test_suite.desc);
-
- if (intlist__find(skiplist, i)) {
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
- continue;
- }
+ }
- test_and_print(&test_suite, 0);
+ *child = zalloc(sizeof(**child));
+ if (!*child)
+ return -ENOMEM;
+
+ (*child)->test = test;
+ (*child)->test_num = i;
+ (*child)->subtest = subi;
+ (*child)->process.pid = -1;
+ (*child)->process.no_stdin = 1;
+ if (verbose <= 0) {
+ (*child)->process.no_stdout = 1;
+ (*child)->process.no_stderr = 1;
+ } else {
+ (*child)->process.out = -1;
+ (*child)->process.err = -1;
}
- return 0;
+ (*child)->process.no_exec_cmd = run_test_child;
+ err = start_command(&(*child)->process);
+ if (err || parallel)
+ return err;
+ return finish_test(*child, width);
}
+#define for_each_test(j, k, t) \
+ for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
+ while ((t = tests[j][k++]) != NULL)
+
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test_suite *t;
unsigned int j, k;
int i = 0;
- int width = list_script_max_width();
+ int width = 0;
+ size_t num_tests = 0;
+ struct child_test **child_tests;
+ int child_test_num = 0;
for_each_test(j, k, t) {
int len = strlen(test_description(t, -1));
if (width < len)
width = len;
+
+ if (has_subtests(t)) {
+ for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
+ len = strlen(test_description(t, subi));
+ if (width < len)
+ width = len;
+ num_tests++;
+ }
+ } else {
+ num_tests++;
+ }
}
+ child_tests = calloc(num_tests, sizeof(*child_tests));
+ if (!child_tests)
+ return -ENOMEM;
for_each_test(j, k, t) {
int curr = i++;
- int subi;
if (!perf_test__matches(test_description(t, -1), curr, argc, argv)) {
bool skip = true;
- int subn;
-
- subn = num_subtests(t);
- for (subi = 0; subi < subn; subi++) {
+ for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
if (perf_test__matches(test_description(t, subi),
curr, argc, argv))
skip = false;
@@ -398,74 +455,45 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
continue;
}
- pr_info("%3d: %-*s:", i, width, test_description(t, -1));
-
if (intlist__find(skiplist, i)) {
+ pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}
if (!has_subtests(t)) {
- test_and_print(t, -1);
- } else {
- int subn = num_subtests(t);
- /*
- * minus 2 to align with normal testcases.
- * For subtest we print additional '.x' in number.
- * for example:
- *
- * 35: Test LLVM searching and compiling :
- * 35.1: Basic BPF llvm compiling test : Ok
- */
- int subw = width > 2 ? width - 2 : width;
-
- if (subn <= 0) {
- color_fprintf(stderr, PERF_COLOR_YELLOW,
- " Skip (not compiled in)\n");
- continue;
- }
- pr_info("\n");
-
- for (subi = 0; subi < subn; subi++) {
- int len = strlen(test_description(t, subi));
+ int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
- if (subw < len)
- subw = len;
+ if (err) {
+ /* TODO: if parallel waitpid the already forked children. */
+ free(child_tests);
+ return err;
}
+ } else {
+ for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
+ int err;
- for (subi = 0; subi < subn; subi++) {
if (!perf_test__matches(test_description(t, subi),
curr, argc, argv))
continue;
- pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
- test_description(t, subi));
- test_and_print(t, subi);
+ err = start_test(t, curr, subi, &child_tests[child_test_num++],
+ width);
+ if (err)
+ return err;
}
}
}
+ for (i = 0; i < child_test_num; i++) {
+ if (parallel) {
+ int ret = finish_test(child_tests[i], width);
- return run_shell_tests(argc, argv, i, width, skiplist);
-}
-
-static int perf_test__list_shell(int argc, const char **argv, int i)
-{
- const struct script_file *files, *file;
-
- files = list_script_files();
- if (!files)
- return 0;
- for (file = files; file->dir; file++) {
- int curr = i++;
- struct test_suite t = {
- .desc = file->desc
- };
-
- if (!perf_test__matches(t.desc, curr, argc, argv))
- continue;
-
- pr_info("%3d: %s\n", i, t.desc);
+ if (ret)
+ return ret;
+ }
+ free(child_tests[i]);
}
+ free(child_tests);
return 0;
}
@@ -492,9 +520,6 @@ static int perf_test__list(int argc, const char **argv)
test_description(t, subi));
}
}
-
- perf_test__list_shell(argc, argv, i);
-
return 0;
}
@@ -536,6 +561,8 @@ int cmd_test(int argc, const char **argv)
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('F', "dont-fork", &dont_fork,
"Do not fork for testcase"),
+ OPT_BOOLEAN('p', "parallel", &parallel,
+ "Run the tests altogether in parallel"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
OPT_STRING(0, "objdump", &test_objdump_path, "path",
@@ -554,6 +581,7 @@ int cmd_test(int argc, const char **argv)
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);
+ tests[2] = create_script_test_suites();
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
if (argc >= 1 && !strcmp(argv[0], "list"))
return perf_test__list(argc - 1, argv + 1);
diff --git a/tools/perf/tests/expand-cgroup.c b/tools/perf/tests/expand-cgroup.c
index 9c1a1f18db..31966ff856 100644
--- a/tools/perf/tests/expand-cgroup.c
+++ b/tools/perf/tests/expand-cgroup.c
@@ -127,8 +127,7 @@ static int expand_group_events(void)
parse_events_error__init(&err);
ret = parse_events(evlist, event_str, &err);
if (ret < 0) {
- pr_debug("failed to parse event '%s', err %d, str '%s'\n",
- event_str, ret, err.str);
+ pr_debug("failed to parse event '%s', err %d\n", event_str, ret);
parse_events_error__print(&err, event_str);
goto out;
}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 8a4da7eb63..a1f8adf853 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -83,6 +83,7 @@ make_no_libelf := NO_LIBELF=1
make_no_libunwind := NO_LIBUNWIND=1
make_no_libdw_dwarf_unwind := NO_LIBDW_DWARF_UNWIND=1
make_no_backtrace := NO_BACKTRACE=1
+make_no_libcapstone := NO_CAPSTONE=1
make_no_libnuma := NO_LIBNUMA=1
make_no_libaudit := NO_LIBAUDIT=1
make_no_libbionic := NO_LIBBIONIC=1
@@ -122,7 +123,7 @@ make_minimal += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
make_minimal += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
make_minimal += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
make_minimal += NO_LIBCRYPTO=1 NO_SDT=1 NO_JVMTI=1 NO_LIBZSTD=1
-make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1
+make_minimal += NO_LIBCAP=1 NO_SYSCALL_TABLE=1 NO_CAPSTONE=1
# $(run) contains all available tests
run := make_pure
@@ -152,6 +153,7 @@ run += make_no_libelf
run += make_no_libunwind
run += make_no_libdw_dwarf_unwind
run += make_no_backtrace
+run += make_no_libcapstone
run += make_no_libnuma
run += make_no_libaudit
run += make_no_libbionic
diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c
index bb3fbfe5a7..b15417a0d6 100644
--- a/tools/perf/tests/maps.c
+++ b/tools/perf/tests/maps.c
@@ -156,6 +156,9 @@ static int test__maps__merge_in(struct test_suite *t __maybe_unused, int subtest
TEST_ASSERT_VAL("merge check failed", !ret);
maps__zput(maps);
+ map__zput(map_kcore1);
+ map__zput(map_kcore2);
+ map__zput(map_kcore3);
return TEST_OK;
}
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index fbdf710d5e..feb5727584 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -2506,11 +2506,10 @@ static int test_event(const struct evlist_test *e)
parse_events_error__init(&err);
ret = parse_events(evlist, e->name, &err);
if (ret) {
- pr_debug("failed to parse event '%s', err %d, str '%s'\n",
- e->name, ret, err.str);
+ pr_debug("failed to parse event '%s', err %d\n", e->name, ret);
parse_events_error__print(&err, e->name);
ret = TEST_FAIL;
- if (err.str && strstr(err.str, "can't access trace events"))
+ if (parse_events_error__contains(&err, "can't access trace events"))
ret = TEST_SKIP;
} else {
ret = e->check(evlist);
@@ -2535,8 +2534,8 @@ static int test_event_fake_pmu(const char *str)
ret = __parse_events(evlist, str, /*pmu_filter=*/NULL, &err,
&perf_pmu__fake, /*warn_if_reordered=*/true);
if (ret) {
- pr_debug("failed to parse event '%s', err %d, str '%s'\n",
- str, ret, err.str);
+ pr_debug("failed to parse event '%s', err %d\n",
+ str, ret);
parse_events_error__print(&err, str);
}
diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c
index a56d329057..47a7c32775 100644
--- a/tools/perf/tests/pmu-events.c
+++ b/tools/perf/tests/pmu-events.c
@@ -70,7 +70,7 @@ static const struct perf_pmu_test_event segment_reg_loads_any = {
.event = {
.pmu = "default_core",
.name = "segment_reg_loads.any",
- .event = "event=0x6,period=200000,umask=0x80",
+ .event = "event=6,period=200000,umask=0x80",
.desc = "Number of segment register loads",
.topic = "other",
},
@@ -82,7 +82,7 @@ static const struct perf_pmu_test_event dispatch_blocked_any = {
.event = {
.pmu = "default_core",
.name = "dispatch_blocked.any",
- .event = "event=0x9,period=200000,umask=0x20",
+ .event = "event=9,period=200000,umask=0x20",
.desc = "Memory cluster signals to block micro-op dispatch for any reason",
.topic = "other",
},
@@ -94,11 +94,11 @@ static const struct perf_pmu_test_event eist_trans = {
.event = {
.pmu = "default_core",
.name = "eist_trans",
- .event = "event=0x3a,period=200000,umask=0x0",
+ .event = "event=0x3a,period=200000",
.desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
.topic = "other",
},
- .alias_str = "event=0x3a,period=0x30d40,umask=0",
+ .alias_str = "event=0x3a,period=0x30d40",
.alias_long_desc = "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
};
@@ -128,7 +128,7 @@ static const struct perf_pmu_test_event *core_events[] = {
static const struct perf_pmu_test_event uncore_hisi_ddrc_flux_wcmd = {
.event = {
.name = "uncore_hisi_ddrc.flux_wcmd",
- .event = "event=0x2",
+ .event = "event=2",
.desc = "DDRC write commands",
.topic = "uncore",
.long_desc = "DDRC write commands",
@@ -156,13 +156,13 @@ static const struct perf_pmu_test_event unc_cbo_xsnp_response_miss_eviction = {
static const struct perf_pmu_test_event uncore_hyphen = {
.event = {
.name = "event-hyphen",
- .event = "event=0xe0,umask=0x00",
+ .event = "event=0xe0",
.desc = "UNC_CBO_HYPHEN",
.topic = "uncore",
.long_desc = "UNC_CBO_HYPHEN",
.pmu = "uncore_cbox",
},
- .alias_str = "event=0xe0,umask=0",
+ .alias_str = "event=0xe0",
.alias_long_desc = "UNC_CBO_HYPHEN",
.matching_pmu = "uncore_cbox_0",
};
@@ -170,13 +170,13 @@ static const struct perf_pmu_test_event uncore_hyphen = {
static const struct perf_pmu_test_event uncore_two_hyph = {
.event = {
.name = "event-two-hyph",
- .event = "event=0xc0,umask=0x00",
+ .event = "event=0xc0",
.desc = "UNC_CBO_TWO_HYPH",
.topic = "uncore",
.long_desc = "UNC_CBO_TWO_HYPH",
.pmu = "uncore_cbox",
},
- .alias_str = "event=0xc0,umask=0",
+ .alias_str = "event=0xc0",
.alias_long_desc = "UNC_CBO_TWO_HYPH",
.matching_pmu = "uncore_cbox_0",
};
@@ -184,7 +184,7 @@ static const struct perf_pmu_test_event uncore_two_hyph = {
static const struct perf_pmu_test_event uncore_hisi_l3c_rd_hit_cpipe = {
.event = {
.name = "uncore_hisi_l3c.rd_hit_cpipe",
- .event = "event=0x7",
+ .event = "event=7",
.desc = "Total read hits",
.topic = "uncore",
.long_desc = "Total read hits",
@@ -265,7 +265,7 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = {
static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = {
.event = {
.name = "sys_cmn_pmu.hnf_cache_miss",
- .event = "eventid=0x1,type=0x5",
+ .event = "eventid=1,type=5",
.desc = "Counts total cache misses in first lookup result (high priority)",
.topic = "uncore",
.pmu = "uncore_sys_cmn_pmu",
diff --git a/tools/perf/tests/shell/base_probe/settings.sh b/tools/perf/tests/shell/base_probe/settings.sh
new file mode 100644
index 0000000000..123621c7f9
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/settings.sh
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# settings.sh of perf_probe test
+# Author: Michael Petlan <mpetlan@redhat.com>
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+#
+
+export TEST_NAME="perf_probe"
+
+export MY_ARCH=`arch`
+
+if [ -n "$PERFSUITE_RUN_DIR" ]; then
+ # when $PERFSUITE_RUN_DIR is set to something, all the logs and temp files will be placed there
+ # --> the $PERFSUITE_RUN_DIR/perf_something/examples and $PERFSUITE_RUN_DIR/perf_something/logs
+ # dirs will be used for that
+ export PERFSUITE_RUN_DIR=`readlink -f $PERFSUITE_RUN_DIR`
+ export CURRENT_TEST_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME"
+ export MAKE_TARGET_DIR="$CURRENT_TEST_DIR/examples"
+ test -d "$MAKE_TARGET_DIR" || mkdir -p "$MAKE_TARGET_DIR"
+ export LOGS_DIR="$PERFSUITE_RUN_DIR/$TEST_NAME/logs"
+ test -d "$LOGS_DIR" || mkdir -p "$LOGS_DIR"
+else
+ # when $PERFSUITE_RUN_DIR is not set, logs will be placed here
+ export CURRENT_TEST_DIR="."
+ export LOGS_DIR="."
+fi
+
+check_kprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/kprobe_events
+}
+
+check_uprobes_available()
+{
+ test -e /sys/kernel/debug/tracing/uprobe_events
+}
+
+clear_all_probes()
+{
+ echo 0 > /sys/kernel/debug/tracing/events/enable
+ check_kprobes_available && echo > /sys/kernel/debug/tracing/kprobe_events
+ check_uprobes_available && echo > /sys/kernel/debug/tracing/uprobe_events
+}
+
+check_sdt_support()
+{
+ $CMD_PERF list sdt | grep sdt > /dev/null 2> /dev/null
+}
diff --git a/tools/perf/tests/shell/base_probe/test_adding_kernel.sh b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
new file mode 100755
index 0000000000..a5d707efad
--- /dev/null
+++ b/tools/perf/tests/shell/base_probe/test_adding_kernel.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# test_adding_kernel of perf_probe test
+# Author: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This test tests adding of probes, their correct listing
+# and removing.
+#
+
+# include working environment
+. ../common/init.sh
+. ./settings.sh
+
+# shellcheck disable=SC2034 # the variable is later used after the working environment is included
+THIS_TEST_NAME=`basename $0 .sh`
+TEST_RESULT=0
+
+TEST_PROBE=${TEST_PROBE:-"inode_permission"}
+
+check_kprobes_available
+if [ $? -ne 0 ]; then
+ print_overall_skipped
+ exit 0
+fi
+
+
+### basic probe adding
+
+for opt in "" "-a" "--add"; do
+ clear_all_probes
+ $CMD_PERF probe $opt $TEST_PROBE 2> $LOGS_DIR/adding_kernel_add$opt.err
+ PERF_EXIT_CODE=$?
+
+ ../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_add$opt.err
+ CHECK_EXIT_CODE=$?
+
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding probe $TEST_PROBE :: $opt"
+ (( TEST_RESULT += $? ))
+done
+
+
+### listing added probe :: perf list
+
+# any added probes should appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "probe:${TEST_PROBE}(?:_\d+)?\s+\[Tracepoint event\]" "Metric Groups:" < $LOGS_DIR/adding_kernel_list.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf list"
+(( TEST_RESULT += $? ))
+
+
+### listing added probe :: perf probe -l
+
+# '-l' should list all the added probes as well
+$CMD_PERF probe -l > $LOGS_DIR/adding_kernel_list-l.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "\s*probe:${TEST_PROBE}(?:_\d+)?\s+\(on ${TEST_PROBE}(?:[:\+]$RE_NUMBER_HEX)?@.+\)" < $LOGS_DIR/adding_kernel_list-l.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing added probe :: perf probe -l"
+(( TEST_RESULT += $? ))
+
+
+### using added probe
+
+$CMD_PERF stat -e probe:$TEST_PROBE\* -o $LOGS_DIR/adding_kernel_using_probe.log -- cat /proc/uptime > /dev/null
+PERF_EXIT_CODE=$?
+
+REGEX_STAT_HEADER="\s*Performance counter stats for \'cat /proc/uptime\':"
+REGEX_STAT_VALUES="\s*\d+\s+probe:$TEST_PROBE"
+# the value should be greater than 1
+REGEX_STAT_VALUE_NONZERO="\s*[1-9][0-9]*\s+probe:$TEST_PROBE"
+REGEX_STAT_TIME="\s*$RE_NUMBER\s+seconds (?:time elapsed|user|sys)"
+../common/check_all_lines_matched.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUES" "$REGEX_STAT_TIME" "$RE_LINE_COMMENT" "$RE_LINE_EMPTY" < $LOGS_DIR/adding_kernel_using_probe.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_STAT_HEADER" "$REGEX_STAT_VALUE_NONZERO" "$REGEX_STAT_TIME" < $LOGS_DIR/adding_kernel_using_probe.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using added probe"
+(( TEST_RESULT += $? ))
+
+
+### removing added probe
+
+# '-d' should remove the probe
+$CMD_PERF probe -d $TEST_PROBE\* 2> $LOGS_DIR/adding_kernel_removing.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" < $LOGS_DIR/adding_kernel_removing.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "deleting added probe"
+(( TEST_RESULT += $? ))
+
+
+### listing removed probe
+
+# removed probes should NOT appear in perf-list output
+$CMD_PERF list probe:\* > $LOGS_DIR/adding_kernel_list_removed.log
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "$RE_LINE_EMPTY" "List of pre-defined events" "Metric Groups:" < $LOGS_DIR/adding_kernel_list_removed.log
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "listing removed probe (should NOT be listed)"
+(( TEST_RESULT += $? ))
+
+
+### dry run
+
+# the '-n' switch should run it in dry mode
+$CMD_PERF probe -n --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_dryrun.err
+PERF_EXIT_CODE=$?
+
+# check for the output (should be the same as usual)
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_dryrun.err
+CHECK_EXIT_CODE=$?
+
+# check that no probe was added in real
+! ( $CMD_PERF probe -l | grep "probe:$TEST_PROBE" )
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "dry run :: adding probe"
+(( TEST_RESULT += $? ))
+
+
+### force-adding probes
+
+# when using '--force' a probe should be added even if it is already there
+$CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_01.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_01.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: first probe adding"
+(( TEST_RESULT += $? ))
+
+# adding existing probe without '--force' should fail
+! $CMD_PERF probe --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_02.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Error: event \"$TEST_PROBE\" already exists." "Error: Failed to add events." < $LOGS_DIR/adding_kernel_forceadd_02.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (without force)"
+(( TEST_RESULT += $? ))
+
+# adding existing probe with '--force' should pass
+NO_OF_PROBES=`$CMD_PERF probe -l | wc -l`
+$CMD_PERF probe --force --add $TEST_PROBE 2> $LOGS_DIR/adding_kernel_forceadd_03.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:${TEST_PROBE}_${NO_OF_PROBES}" "on $TEST_PROBE" < $LOGS_DIR/adding_kernel_forceadd_03.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "force-adding probes :: second probe adding (with force)"
+(( TEST_RESULT += $? ))
+
+
+### using doubled probe
+
+# since they are the same, they should produce the same results
+$CMD_PERF stat -e probe:$TEST_PROBE -e probe:${TEST_PROBE}_${NO_OF_PROBES} -x';' -o $LOGS_DIR/adding_kernel_using_two.log -- bash -c 'cat /proc/cpuinfo > /dev/null'
+PERF_EXIT_CODE=$?
+
+REGEX_LINE="$RE_NUMBER;+probe:${TEST_PROBE}_?(?:$NO_OF_PROBES)?;$RE_NUMBER;$RE_NUMBER"
+../common/check_all_lines_matched.pl "$REGEX_LINE" "$RE_LINE_EMPTY" "$RE_LINE_COMMENT" < $LOGS_DIR/adding_kernel_using_two.log
+CHECK_EXIT_CODE=$?
+
+VALUE_1=`grep "$TEST_PROBE;" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+VALUE_2=`grep "${TEST_PROBE}_${NO_OF_PROBES};" $LOGS_DIR/adding_kernel_using_two.log | awk -F';' '{print $1}'`
+
+test $VALUE_1 -eq $VALUE_2
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "using doubled probe"
+
+
+### removing multiple probes
+
+# using wildcards should remove all matching probes
+$CMD_PERF probe --del \* 2> $LOGS_DIR/adding_kernel_removing_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_lines_matched.pl "Removed event: probe:$TEST_PROBE" "Removed event: probe:${TEST_PROBE}_1" < $LOGS_DIR/adding_kernel_removing_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "removing multiple probes"
+(( TEST_RESULT += $? ))
+
+
+### wildcard adding support
+
+$CMD_PERF probe -nf --max-probes=512 -a 'vfs_* $params' 2> $LOGS_DIR/adding_kernel_adding_wildcard.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "probe:vfs_mknod" "probe:vfs_create" "probe:vfs_rmdir" "probe:vfs_link" "probe:vfs_write" < $LOGS_DIR/adding_kernel_adding_wildcard.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "wildcard adding support"
+(( TEST_RESULT += $? ))
+
+
+### non-existing variable
+
+# perf probe should survive a non-existing variable probing attempt
+{ $CMD_PERF probe 'vfs_read somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64' ; } 2> $LOGS_DIR/adding_kernel_nonexisting.err
+PERF_EXIT_CODE=$?
+
+# the exitcode should not be 0 or segfault
+test $PERF_EXIT_CODE -ne 139 -a $PERF_EXIT_CODE -ne 0
+PERF_EXIT_CODE=$?
+
+# check that the error message is reasonable
+../common/check_all_patterns_found.pl "Failed to find" "somenonexistingrandomstuffwhichisalsoprettylongorevenlongertoexceed64" < $LOGS_DIR/adding_kernel_nonexisting.err
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "in this function|at this address" "Error" "Failed to add events" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_all_lines_matched.pl "Failed to find" "Error" "Probe point .+ not found" "optimized out" "Use.+\-\-range option to show.+location range" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+../common/check_no_patterns_found.pl "$RE_SEGFAULT" < $LOGS_DIR/adding_kernel_nonexisting.err
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "non-existing variable"
+(( TEST_RESULT += $? ))
+
+
+### function with return value
+
+# adding probe with return value
+$CMD_PERF probe --add "$TEST_PROBE%return \$retval" 2> $LOGS_DIR/adding_kernel_func_retval_add.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "Added new events?:" "probe:$TEST_PROBE" "on $TEST_PROBE%return with \\\$retval" < $LOGS_DIR/adding_kernel_func_retval_add.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: add"
+(( TEST_RESULT += $? ))
+
+# recording some data
+$CMD_PERF record -e probe:$TEST_PROBE\* -o $CURRENT_TEST_DIR/perf.data -- cat /proc/cpuinfo > /dev/null 2> $LOGS_DIR/adding_kernel_func_retval_record.err
+PERF_EXIT_CODE=$?
+
+../common/check_all_patterns_found.pl "$RE_LINE_RECORD1" "$RE_LINE_RECORD2" < $LOGS_DIR/adding_kernel_func_retval_record.err
+CHECK_EXIT_CODE=$?
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function with retval :: record"
+(( TEST_RESULT += $? ))
+
+# perf script should report the function calls with the correct arg values
+$CMD_PERF script -i $CURRENT_TEST_DIR/perf.data > $LOGS_DIR/adding_kernel_func_retval_script.log
+PERF_EXIT_CODE=$?
+
+REGEX_SCRIPT_LINE="\s*cat\s+$RE_NUMBER\s+\[$RE_NUMBER\]\s+$RE_NUMBER:\s+probe:$TEST_PROBE\w*:\s+\($RE_NUMBER_HEX\s+<\-\s+$RE_NUMBER_HEX\)\s+arg1=$RE_NUMBER_HEX"
+../common/check_all_lines_matched.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+CHECK_EXIT_CODE=$?
+../common/check_all_patterns_found.pl "$REGEX_SCRIPT_LINE" < $LOGS_DIR/adding_kernel_func_retval_script.log
+(( CHECK_EXIT_CODE += $? ))
+
+print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "function argument probing :: script"
+(( TEST_RESULT += $? ))
+
+
+clear_all_probes
+
+# print overall results
+print_overall_results "$TEST_RESULT"
+exit $?
diff --git a/tools/perf/tests/shell/common/check_all_lines_matched.pl b/tools/perf/tests/shell/common/check_all_lines_matched.pl
new file mode 100755
index 0000000000..fded48959a
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_lines_matched.pl
@@ -0,0 +1,39 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$max_printed_lines = 20;
+$max_printed_lines = $ENV{TESTLOG_ERR_MSG_MAX_LINES} if (defined $ENV{TESTLOG_ERR_MSG_MAX_LINES});
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+$passed = 1;
+$lines_printed = 0;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ $line_matched = 0;
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $line_matched = 1;
+ last;
+ }
+ }
+
+ unless ($line_matched)
+ {
+ if ($lines_printed++ < $max_printed_lines)
+ {
+ print "Line did not match any pattern: \"$_\"\n" unless $quiet;
+ }
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_all_patterns_found.pl b/tools/perf/tests/shell/common/check_all_patterns_found.pl
new file mode 100755
index 0000000000..11bdf1d346
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_all_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1; # FIXME: maybe add counters -- how many times was the regexp matched
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ unless (exists $found{$r})
+ {
+ print "Regexp not found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/check_no_patterns_found.pl b/tools/perf/tests/shell/common/check_no_patterns_found.pl
new file mode 100755
index 0000000000..770999e87a
--- /dev/null
+++ b/tools/perf/tests/shell/common/check_no_patterns_found.pl
@@ -0,0 +1,34 @@
+#!/usr/bin/perl
+# SPDX-License-Identifier: GPL-2.0
+
+@regexps = @ARGV;
+
+$quiet = 1;
+$quiet = 0 if (defined $ENV{TESTLOG_VERBOSITY} && $ENV{TESTLOG_VERBOSITY} ge 2);
+
+%found = ();
+$passed = 1;
+
+while (<STDIN>)
+{
+ s/\n//;
+
+ for $r (@regexps)
+ {
+ if (/$r/)
+ {
+ $found{$r} = 1;
+ }
+ }
+}
+
+for $r (@regexps)
+{
+ if (exists $found{$r})
+ {
+ print "Regexp found: \"$r\"\n" unless $quiet;
+ $passed = 0;
+ }
+}
+
+exit ($passed == 0);
diff --git a/tools/perf/tests/shell/common/init.sh b/tools/perf/tests/shell/common/init.sh
new file mode 100644
index 0000000000..aadeaf782e
--- /dev/null
+++ b/tools/perf/tests/shell/common/init.sh
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# init.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file should be used for initialization of basic functions
+# for checking, reporting results etc.
+#
+#
+
+
+. ../common/settings.sh
+. ../common/patterns.sh
+
+THIS_TEST_NAME=`basename $0 .sh`
+
+_echo()
+{
+ test "$TESTLOG_VERBOSITY" -ne 0 && echo -e "$@"
+}
+
+print_results()
+{
+ PERF_RETVAL="$1"; shift
+ CHECK_RETVAL="$1"; shift
+ FAILURE_REASON=""
+ TASK_COMMENT="$@"
+ if [ $PERF_RETVAL -eq 0 -a $CHECK_RETVAL -eq 0 ]; then
+ _echo "$MPASS-- [ PASS ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT"
+ return 0
+ else
+ if [ $PERF_RETVAL -ne 0 ]; then
+ FAILURE_REASON="command exitcode"
+ fi
+ if [ $CHECK_RETVAL -ne 0 ]; then
+ test -n "$FAILURE_REASON" && FAILURE_REASON="$FAILURE_REASON + "
+ FAILURE_REASON="$FAILURE_REASON""output regexp parsing"
+ fi
+ _echo "$MFAIL-- [ FAIL ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT ($FAILURE_REASON)"
+ return 1
+ fi
+}
+
+print_overall_results()
+{
+ RETVAL="$1"; shift
+ if [ $RETVAL -eq 0 ]; then
+ _echo "$MALLPASS## [ PASS ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY"
+ else
+ _echo "$MALLFAIL## [ FAIL ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME SUMMARY :: $RETVAL failures found"
+ fi
+ return $RETVAL
+}
+
+print_testcase_skipped()
+{
+ TASK_COMMENT="$@"
+ _echo "$MSKIP-- [ SKIP ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $TASK_COMMENT :: testcase skipped"
+ return 0
+}
+
+print_overall_skipped()
+{
+ _echo "$MSKIP## [ SKIP ] ##$MEND $TEST_NAME :: $THIS_TEST_NAME :: testcase skipped"
+ return 0
+}
+
+print_warning()
+{
+ WARN_COMMENT="$@"
+ _echo "$MWARN-- [ WARN ] --$MEND $TEST_NAME :: $THIS_TEST_NAME :: $WARN_COMMENT"
+ return 0
+}
+
+# this function should skip a testcase if the testsuite is not run in
+# a runmode that fits the testcase --> if the suite runs in BASIC mode
+# all STANDARD and EXPERIMENTAL testcases will be skipped; if the suite
+# runs in STANDARD mode, all EXPERIMENTAL testcases will be skipped and
+# if the suite runs in EXPERIMENTAL mode, nothing is skipped
+consider_skipping()
+{
+ TESTCASE_RUNMODE="$1"
+ # the runmode of a testcase needs to be at least the current suite's runmode
+ if [ $PERFTOOL_TESTSUITE_RUNMODE -lt $TESTCASE_RUNMODE ]; then
+ print_overall_skipped
+ exit 0
+ fi
+}
+
+detect_baremetal()
+{
+ # return values:
+ # 0 = bare metal
+ # 1 = virtualization detected
+ # 2 = unknown state
+ VIRT=`systemd-detect-virt 2>/dev/null`
+ test $? -eq 127 && return 2
+ test "$VIRT" = "none"
+}
+
+detect_intel()
+{
+ # return values:
+ # 0 = is Intel
+ # 1 = is not Intel or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "GenuineIntel"
+}
+
+detect_amd()
+{
+ # return values:
+ # 0 = is AMD
+ # 1 = is not AMD or unknown
+ grep "vendor_id" < /proc/cpuinfo | grep -q "AMD"
+}
diff --git a/tools/perf/tests/shell/common/patterns.sh b/tools/perf/tests/shell/common/patterns.sh
new file mode 100644
index 0000000000..21dab25c7b
--- /dev/null
+++ b/tools/perf/tests/shell/common/patterns.sh
@@ -0,0 +1,268 @@
+# SPDX-License-Identifier: GPL-2.0
+
+export RE_NUMBER="[0-9\.]+"
+# Number
+# Examples:
+# 123.456
+
+
+export RE_NUMBER_HEX="[0-9A-Fa-f]+"
+# Hexadecimal number
+# Examples:
+# 1234
+# a58d
+# aBcD
+# deadbeef
+
+
+export RE_DATE_YYYYMMDD="[0-9]{4}-(?:(?:01|03|05|07|08|10|12)-(?:[0-2][0-9]|3[0-1])|02-[0-2][0-9]|(?:(?:04|06|09|11)-(?:[0-2][0-9]|30)))"
+# Date in YYYY-MM-DD form
+# Examples:
+# 1990-02-29
+# 0015-07-31
+# 2456-12-31
+#! 2012-13-01
+#! 1963-09-31
+
+
+export RE_TIME="(?:[0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9]"
+# Time
+# Examples:
+# 15:12:27
+# 23:59:59
+#! 24:00:00
+#! 11:25:60
+#! 17:60:15
+
+
+export RE_DATE_TIME="\w+\s+\w+\s+$RE_NUMBER\s+$RE_TIME\s+$RE_NUMBER"
+# Time and date
+# Examples:
+# Wed Feb 12 10:46:26 2020
+# Mon Mar 2 13:27:06 2020
+#! St úno 12 10:57:21 CET 2020
+#! Po úno 14 15:17:32 2010
+
+
+export RE_ADDRESS="0x$RE_NUMBER_HEX"
+# Memory address
+# Examples:
+# 0x123abc
+# 0xffffffff9abe8ae8
+# 0x0
+
+
+export RE_ADDRESS_NOT_NULL="0x[0-9A-Fa-f]*[1-9A-Fa-f]+[0-9A-Fa-f]*"
+# Memory address (not NULL)
+# Examples:
+# 0xffffffff9abe8ae8
+#! 0x0
+#! 0x0000000000000000
+
+export RE_PROCESS_PID="[^\/]+\/\d+"
+# A process with PID
+# Example:
+# sleep/4102
+# test_overhead./866185
+# in:imjournal/1096
+# random#$& test/866607
+
+export RE_EVENT_ANY="[\w\-\:\/_=,]+"
+# Name of any event (universal)
+# Examples:
+# cpu-cycles
+# cpu/event=12,umask=34/
+# r41e1
+# nfs:nfs_getattr_enter
+
+
+export RE_EVENT="[\w\-:_]+"
+# Name of an usual event
+# Examples:
+# cpu-cycles
+
+
+export RE_EVENT_RAW="r$RE_NUMBER_HEX"
+# Specification of a raw event
+# Examples:
+# r41e1
+# r1a
+
+
+export RE_EVENT_CPU="cpu/(\w+=$RE_NUMBER_HEX,?)+/p*"
+# Specification of a CPU event
+# Examples:
+# cpu/event=12,umask=34/pp
+
+
+export RE_EVENT_UNCORE="uncore/[\w_]+/"
+# Specification of an uncore event
+# Examples:
+# uncore/qhl_request_local_reads/
+
+
+export RE_EVENT_SUBSYSTEM="[\w\-]+:[\w\-]+"
+# Name of an event from subsystem
+# Examples:
+# ext4:ext4_ordered_write_end
+# sched:sched_switch
+
+
+export RE_FILE_NAME="[\w\+\.-]+"
+# A filename
+# Examples:
+# libstdc++.so.6
+#! some/path
+
+
+export RE_PATH_ABSOLUTE="(?:\/$RE_FILE_NAME)+"
+# A full filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# /usr/bin/mv
+#! some/relative/path
+#! ./some/relative/path
+
+
+export RE_PATH="(?:$RE_FILE_NAME)?$RE_PATH_ABSOLUTE"
+# A filepath
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# ./.emacs
+# src/fs/file.c
+
+
+export RE_DSO="(?:$RE_PATH_ABSOLUTE(?: \(deleted\))?|\[kernel\.kallsyms\]|\[unknown\]|\[vdso\]|\[kernel\.vmlinux\][\.\w]*)"
+# A DSO name in various result tables
+# Examples:
+# /usr/lib64/somelib.so.5.4.0
+# /usr/bin/somebinart (deleted)
+# /lib/modules/4.3.0-rc5/kernel/fs/xfs/xfs.ko
+# [kernel.kallsyms]
+# [kernel.vmlinux]
+# [vdso]
+# [unknown]
+
+
+export RE_LINE_COMMENT="^#.*"
+# A comment line
+# Examples:
+# # Started on Thu Sep 10 11:43:00 2015
+
+
+export RE_LINE_EMPTY="^\s*$"
+# An empty line with possible whitespaces
+# Examples:
+#
+
+
+export RE_LINE_RECORD1="^\[\s+perf\s+record:\s+Woken up $RE_NUMBER times? to write data\s+\].*$"
+# The first line of perf-record "OK" output
+# Examples:
+# [ perf record: Woken up 1 times to write data ]
+
+
+export RE_LINE_RECORD2="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+
+
+export RE_LINE_RECORD2_TOLERANT="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\.data(?:\.\d+)?\s*(?:\(~?$RE_NUMBER samples\))?\s+\].*$"
+# The second line of perf-record "OK" output, even no samples is OK here
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB /some/temp/dir/perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perf.data.3 (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_RECORD2_TOLERANT_FILENAME="^\[\s+perf\s+record:\s+Captured and wrote $RE_NUMBER\s*MB\s+(?:[\w\+\.-]*(?:$RE_PATH)?\/)?perf\w*\.data(?:\.\d+)?\s*\(~?$RE_NUMBER samples\)\s+\].*$"
+# The second line of perf-record "OK" output
+# Examples:
+# [ perf record: Captured and wrote 0.405 MB perf.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_ls.data (~109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB perf_aNyCaSe.data (109 samples) ]
+# [ perf record: Captured and wrote 0.405 MB ./perfdata.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB /some/temp/dir/my_own.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./UPPERCASE.data (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB ./aNyKiNDoF.data.3 (109 samples) ]
+#! [ perf record: Captured and wrote 0.405 MB perf.data ]
+
+
+export RE_LINE_TRACE_FULL="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+=\s+(:?\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): sleep/4102 mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): sleep/4102 open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_ONE_PROC="^\s*$RE_NUMBER\s*\(\s*$RE_NUMBER\s*ms\s*\):\s*\w+\(.*\)\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) = 3
+# 0.157 ( 0.005 ms): mmap(len: 3932736, prot: EXEC|READ, flags: PRIVATE|DENYWRITE, fd: 3 ) = 0x7f89d0605000
+#! 0.115 ( 0.005 ms): open(filename: 0xd09e2ab2, flags: CLOEXEC ) =
+
+export RE_LINE_TRACE_CONTINUED="^\s*(:?$RE_NUMBER|\?)\s*\(\s*($RE_NUMBER\s*ms\s*)?\):\s*($RE_PROCESS_PID\s*)?\.\.\.\s*\[continued\]:\s+\w+\(\).*\s+=\s+(?:\-?$RE_NUMBER|0x$RE_NUMBER_HEX).*$"
+# A line of perf-trace output
+# Examples:
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0
+# 0.000 ( 0.000 ms): ... [continued]: nanosleep()) = 0x00000000
+# ? ( ): packagekitd/94838 ... [continued]: poll()) = 0 (Timeout)
+#! 0.000 ( 0.000 ms): ... [continued]: nanosleep()) =
+
+export RE_LINE_TRACE_UNFINISHED="^\s*$RE_NUMBER\s*\(\s*\):\s*$RE_PROCESS_PID\s+.*\)\s+\.\.\.\s*$"
+# A line of perf-trace output
+# Examples:
+# 901.040 ( ): in:imjournal/1096 ppoll(ufds: 0x7f701a5adb70, nfds: 1, tsp: 0x7f701a5adaf0, sigsetsize: 8) ...
+# 613.727 ( ): gmain/1099 poll(ufds: 0x56248f6b64b0, nfds: 2, timeout_msecs: 3996) ...
+
+export RE_LINE_TRACE_SUMMARY_HEADER="\s*syscall\s+calls\s+(?:errors\s+)?total\s+min\s+avg\s+max\s+stddev"
+# A header of a perf-trace summary table
+# Example:
+# syscall calls total min avg max stddev
+# syscall calls errors total min avg max stddev
+
+
+export RE_LINE_TRACE_SUMMARY_CONTENT="^\s*\w+\s+(?:$RE_NUMBER\s+){5,6}$RE_NUMBER%"
+# A line of a perf-trace summary table
+# Example:
+# open 3 0.017 0.005 0.006 0.007 10.90%
+# openat 2 0 0.017 0.008 0.009 0.010 12.29%
+
+
+export RE_LINE_REPORT_CONTENT="^\s+$RE_NUMBER%\s+\w+\s+\S+\s+\S+\s+\S+" # FIXME
+# A line from typicap perf report --stdio output
+# Example:
+# 100.00% sleep [kernel.vmlinux] [k] syscall_return_slowpath
+
+
+export RE_TASK="\s+[\w~\/ \.\+:#-]+(?:\[-1(?:\/\d+)?\]|\[\d+(?:\/\d+)?\])"
+# A name of a task used for perf sched timehist -s
+# Example:
+# sleep[62755]
+# runtest.sh[62762]
+# gmain[705/682]
+# xfsaild/dm-0[495]
+# kworker/u8:1-ev[62714]
+# :-1[-1/62756]
+# :-1[-1]
+# :-1[62756]
+
+
+export RE_SEGFAULT=".*(?:Segmentation\sfault|SIGSEGV|\score\s|dumped|segfault).*"
+# Possible variations of the segfault message
+# Example:
+# /bin/bash: line 1: 32 Segmentation fault timeout 15s
+# Segmentation fault (core dumped)
+# Program terminated with signal SIGSEGV
+#! WARNING: 12323431 isn't a 'cpu_core', please use a CPU list in the 'cpu_core' range (0-15)
diff --git a/tools/perf/tests/shell/common/settings.sh b/tools/perf/tests/shell/common/settings.sh
new file mode 100644
index 0000000000..361641dbaa
--- /dev/null
+++ b/tools/perf/tests/shell/common/settings.sh
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# settings.sh
+# Author: Michael Petlan <mpetlan@redhat.com>
+#
+# Description:
+#
+# This file contains global settings for the whole testsuite.
+# Its purpose is to make it easier when it is necessary i.e. to
+# change the usual sample command which is used in all of the tests
+# in many files.
+#
+# This file is intended to be sourced in the tests.
+#
+
+#### which perf to use in the testing
+export CMD_PERF=${CMD_PERF:-`which perf`}
+
+#### basic programs examinated by perf
+export CMD_BASIC_SLEEP="sleep 0.1"
+export CMD_QUICK_SLEEP="sleep 0.01"
+export CMD_LONGER_SLEEP="sleep 2"
+export CMD_DOUBLE_LONGER_SLEEP="sleep 4"
+export CMD_VERY_LONG_SLEEP="sleep 30"
+export CMD_SIMPLE="true"
+
+#### testsuite run mode
+# define constants:
+export RUNMODE_BASIC=0
+export RUNMODE_STANDARD=1
+export RUNMODE_EXPERIMENTAL=2
+# default runmode is STANDARD
+export PERFTOOL_TESTSUITE_RUNMODE=${PERFTOOL_TESTSUITE_RUNMODE:-$RUNMODE_STANDARD}
+
+#### common settings
+export TESTLOG_VERBOSITY=${TESTLOG_VERBOSITY:-2}
+export TESTLOG_FORCE_COLOR=${TESTLOG_FORCE_COLOR:-n}
+export TESTLOG_ERR_MSG_MAX_LINES=${TESTLOG_ERR_MSG_MAX_LINES:-20}
+export TESTLOG_CLEAN=${TESTLOG_CLEAN:-y}
+
+#### other environment-related settings
+export TEST_IGNORE_MISSING_PMU=${TEST_IGNORE_MISSING_PMU:-n}
+
+#### clear locale
+export LC_ALL=C
+
+#### colors
+if [ -t 1 -o "$TESTLOG_FORCE_COLOR" = "yes" ]; then
+ export MPASS="\e[32m"
+ export MALLPASS="\e[1;32m"
+ export MFAIL="\e[31m"
+ export MALLFAIL="\e[1;31m"
+ export MWARN="\e[1;35m"
+ export MSKIP="\e[33m"
+ export MHIGH="\e[1;33m"
+ export MEND="\e[m"
+else
+ export MPASS=""
+ export MALLPASS=""
+ export MFAIL=""
+ export MALLFAIL=""
+ export MWARN=""
+ export MSKIP=""
+ export MHIGH=""
+ export MEND=""
+fi
+
+
+#### test parametrization
+if [ ! -d ./common ]; then
+ # set parameters based on runmode
+ if [ -f ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh ]; then
+ . ../common/parametrization.$PERFTOOL_TESTSUITE_RUNMODE.sh
+ fi
+ # if some parameters haven't been set until now, set them to default
+ if [ -f ../common/parametrization.sh ]; then
+ . ../common/parametrization.sh
+ fi
+fi
diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 5d59c32ae3..561c93b75d 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -3,7 +3,7 @@
perf_has_symbol()
{
- if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+ if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
echo "perf does have symbol '$1'"
return 0
fi
diff --git a/tools/perf/tests/shell/lib/perf_json_output_lint.py b/tools/perf/tests/shell/lib/perf_json_output_lint.py
index ea55d5ea1c..abc1fd7377 100644
--- a/tools/perf/tests/shell/lib/perf_json_output_lint.py
+++ b/tools/perf/tests/shell/lib/perf_json_output_lint.py
@@ -15,6 +15,7 @@ ap.add_argument('--event', action='store_true')
ap.add_argument('--per-core', action='store_true')
ap.add_argument('--per-thread', action='store_true')
ap.add_argument('--per-cache', action='store_true')
+ap.add_argument('--per-cluster', action='store_true')
ap.add_argument('--per-die', action='store_true')
ap.add_argument('--per-node', action='store_true')
ap.add_argument('--per-socket', action='store_true')
@@ -49,6 +50,7 @@ def check_json_output(expected_items):
'cgroup': lambda x: True,
'cpu': lambda x: isint(x),
'cache': lambda x: True,
+ 'cluster': lambda x: True,
'die': lambda x: True,
'event': lambda x: True,
'event-runtime': lambda x: isfloat(x),
@@ -88,7 +90,7 @@ try:
expected_items = 7
elif args.interval or args.per_thread or args.system_wide_no_aggr:
expected_items = 8
- elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cache:
+ elif args.per_core or args.per_socket or args.per_node or args.per_die or args.per_cluster or args.per_cache:
expected_items = 9
else:
# If no option is specified, don't check the number of items.
diff --git a/tools/perf/tests/shell/lib/perf_metric_validation.py b/tools/perf/tests/shell/lib/perf_metric_validation.py
index 50a34a9cc0..a2d2352521 100644
--- a/tools/perf/tests/shell/lib/perf_metric_validation.py
+++ b/tools/perf/tests/shell/lib/perf_metric_validation.py
@@ -1,4 +1,4 @@
-#SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0
import re
import csv
import json
@@ -6,36 +6,61 @@ import argparse
from pathlib import Path
import subprocess
+
+class TestError:
+ def __init__(self, metric: list[str], wl: str, value: list[float], low: float, up=float('nan'), description=str()):
+ self.metric: list = metric # multiple metrics in relationship type tests
+ self.workloads = [wl] # multiple workloads possible
+ self.collectedValue: list = value
+ self.valueLowBound = low
+ self.valueUpBound = up
+ self.description = description
+
+ def __repr__(self) -> str:
+ if len(self.metric) > 1:
+ return "\nMetric Relationship Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2} \n\
+ \tbut expected value range is [{3}, {4}]\n\
+ \tRelationship rule description: \'{5}\'".format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound, self.description)
+ elif len(self.collectedValue) == 0:
+ return "\nNo Metric Value Error: \tMetric {0} returns with no value \n\
+ \tworkload(s): {1}".format(self.metric, self.workloads)
+ else:
+ return "\nWrong Metric Value Error: \tThe collected value of metric {0}\n\
+ \tis {1} in workload(s): {2}\n\
+ \tbut expected value range is [{3}, {4}]"\
+ .format(self.metric, self.collectedValue, self.workloads,
+ self.valueLowBound, self.valueUpBound)
+
+
class Validator:
def __init__(self, rulefname, reportfname='', t=5, debug=False, datafname='', fullrulefname='', workload='true', metrics=''):
self.rulefname = rulefname
self.reportfname = reportfname
self.rules = None
- self.collectlist:str = metrics
+ self.collectlist: str = metrics
self.metrics = self.__set_metrics(metrics)
self.skiplist = set()
self.tolerance = t
self.workloads = [x for x in workload.split(",") if x]
- self.wlidx = 0 # idx of current workloads
- self.allresults = dict() # metric results of all workload
- self.allignoremetrics = dict() # metrics with no results or negative results
- self.allfailtests = dict()
+ self.wlidx = 0 # idx of current workloads
+ self.allresults = dict() # metric results of all workload
self.alltotalcnt = dict()
self.allpassedcnt = dict()
- self.allerrlist = dict()
- self.results = dict() # metric results of current workload
+ self.results = dict() # metric results of current workload
# vars for test pass/failure statistics
- self.ignoremetrics= set() # metrics with no results or negative results, neg result counts as a failed test
- self.failtests = dict()
+ # metrics with no results or negative results, neg result counts failed tests
+ self.ignoremetrics = set()
self.totalcnt = 0
self.passedcnt = 0
# vars for errors
self.errlist = list()
# vars for Rule Generator
- self.pctgmetrics = set() # Percentage rule
+ self.pctgmetrics = set() # Percentage rule
# vars for debug
self.datafname = datafname
@@ -69,10 +94,10 @@ class Validator:
ensure_ascii=True,
indent=4)
- def get_results(self, idx:int = 0):
+ def get_results(self, idx: int = 0):
return self.results[idx]
- def get_bounds(self, lb, ub, error, alias={}, ridx:int = 0) -> list:
+ def get_bounds(self, lb, ub, error, alias={}, ridx: int = 0) -> list:
"""
Get bounds and tolerance from lb, ub, and error.
If missing lb, use 0.0; missing ub, use float('inf); missing error, use self.tolerance.
@@ -85,7 +110,7 @@ class Validator:
tolerance, denormalized base on upper bound value
"""
# init ubv and lbv to invalid values
- def get_bound_value (bound, initval, ridx):
+ def get_bound_value(bound, initval, ridx):
val = initval
if isinstance(bound, int) or isinstance(bound, float):
val = bound
@@ -113,10 +138,10 @@ class Validator:
return lbv, ubv, denormerr
- def get_value(self, name:str, ridx:int = 0) -> list:
+ def get_value(self, name: str, ridx: int = 0) -> list:
"""
Get value of the metric from self.results.
- If result of this metric is not provided, the metric name will be added into self.ignoremetics and self.errlist.
+ If result of this metric is not provided, the metric name will be added into self.ignoremetics.
All future test(s) on this metric will fail.
@param name: name of the metric
@@ -142,7 +167,7 @@ class Validator:
Check if metrics value are non-negative.
One metric is counted as one test.
Failure: when metric value is negative or not provided.
- Metrics with negative value will be added into the self.failtests['PositiveValueTest'] and self.ignoremetrics.
+ Metrics with negative value will be added into self.ignoremetrics.
"""
negmetric = dict()
pcnt = 0
@@ -155,25 +180,27 @@ class Validator:
else:
pcnt += 1
tcnt += 1
+ # The first round collect_perf() run these metrics with simple workload
+ # "true". We give metrics a second chance with a longer workload if less
+ # than 20 metrics failed positive test.
if len(rerun) > 0 and len(rerun) < 20:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in negmetric: continue
+ if name not in negmetric:
+ continue
if val >= 0:
del negmetric[name]
pcnt += 1
- self.failtests['PositiveValueTest']['Total Tests'] = tcnt
- self.failtests['PositiveValueTest']['Passed Tests'] = pcnt
if len(negmetric.keys()):
self.ignoremetrics.update(negmetric.keys())
- negmessage = ["{0}(={1:.4f})".format(name, val) for name, val in negmetric.items()]
- self.failtests['PositiveValueTest']['Failed Tests'].append({'NegativeValue': negmessage})
+ self.errlist.extend(
+ [TestError([m], self.workloads[self.wlidx], negmetric[m], 0) for m in negmetric.keys()])
return
- def evaluate_formula(self, formula:str, alias:dict, ridx:int = 0):
+ def evaluate_formula(self, formula: str, alias: dict, ridx: int = 0):
"""
Evaluate the value of formula.
@@ -187,10 +214,11 @@ class Validator:
sign = "+"
f = str()
- #TODO: support parenthesis?
+ # TODO: support parenthesis?
for i in range(len(formula)):
if i+1 == len(formula) or formula[i] in ('+', '-', '*', '/'):
- s = alias[formula[b:i]] if i+1 < len(formula) else alias[formula[b:]]
+ s = alias[formula[b:i]] if i + \
+ 1 < len(formula) else alias[formula[b:]]
v = self.get_value(s, ridx)
if not v:
errs.append(s)
@@ -228,49 +256,49 @@ class Validator:
alias = dict()
for m in rule['Metrics']:
alias[m['Alias']] = m['Name']
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
- val, f = self.evaluate_formula(rule['Formula'], alias, ridx=rule['RuleIndex'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'], alias, ridx=rule['RuleIndex'])
+ val, f = self.evaluate_formula(
+ rule['Formula'], alias, ridx=rule['RuleIndex'])
+
+ lb = rule['RangeLower']
+ ub = rule['RangeUpper']
+ if isinstance(lb, str):
+ if lb in alias:
+ lb = alias[lb]
+ if isinstance(ub, str):
+ if ub in alias:
+ ub = alias[ub]
+
if val == -1:
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Description':f})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [],
+ lb, ub, rule['Description']))
elif not self.check_bound(val, lbv, ubv, t):
- lb = rule['RangeLower']
- ub = rule['RangeUpper']
- if isinstance(lb, str):
- if lb in alias:
- lb = alias[lb]
- if isinstance(ub, str):
- if ub in alias:
- ub = alias[ub]
- self.failtests['RelationshipTest']['Failed Tests'].append({'RuleIndex': rule['RuleIndex'], 'Formula':f,
- 'RangeLower': lb, 'LowerBoundValue': self.get_value(lb),
- 'RangeUpper': ub, 'UpperBoundValue':self.get_value(ub),
- 'ErrorThreshold': t, 'CollectedValue': val})
+ self.errlist.append(TestError([m['Name'] for m in rule['Metrics']], self.workloads[self.wlidx], [val],
+ lb, ub, rule['Description']))
else:
self.passedcnt += 1
- self.failtests['RelationshipTest']['Passed Tests'] += 1
self.totalcnt += 1
- self.failtests['RelationshipTest']['Total Tests'] += 1
return
-
# Single Metric Test
- def single_test(self, rule:dict):
+ def single_test(self, rule: dict):
"""
Validate if the metrics are in the required value range.
eg. lower_bound <= metrics_value <= upper_bound
One metric is counted as one test in this type of test.
One rule may include one or more metrics.
Failure: when the metric value not provided or the value is outside the bounds.
- This test updates self.total_cnt and records failed tests in self.failtest['SingleMetricTest'].
+ This test updates self.total_cnt.
@param rule: dict with metrics to validate and the value range requirement
"""
- lbv, ubv, t = self.get_bounds(rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
+ lbv, ubv, t = self.get_bounds(
+ rule['RangeLower'], rule['RangeUpper'], rule['ErrorThreshold'])
metrics = rule['Metrics']
passcnt = 0
totalcnt = 0
- faillist = list()
failures = dict()
rerun = list()
for m in metrics:
@@ -286,25 +314,20 @@ class Validator:
second_results = dict()
self.second_test(rerun, second_results)
for name, val in second_results.items():
- if name not in failures: continue
+ if name not in failures:
+ continue
if self.check_bound(val, lbv, ubv, t):
passcnt += 1
del failures[name]
else:
- failures[name] = val
+ failures[name] = [val]
self.results[0][name] = val
self.totalcnt += totalcnt
self.passedcnt += passcnt
- self.failtests['SingleMetricTest']['Total Tests'] += totalcnt
- self.failtests['SingleMetricTest']['Passed Tests'] += passcnt
if len(failures.keys()) != 0:
- faillist = [{'MetricName':name, 'CollectedValue':val} for name, val in failures.items()]
- self.failtests['SingleMetricTest']['Failed Tests'].append({'RuleIndex':rule['RuleIndex'],
- 'RangeLower': rule['RangeLower'],
- 'RangeUpper': rule['RangeUpper'],
- 'ErrorThreshold':rule['ErrorThreshold'],
- 'Failure':faillist})
+ self.errlist.extend([TestError([name], self.workloads[self.wlidx], val,
+ rule['RangeLower'], rule['RangeUpper']) for name, val in failures.items()])
return
@@ -312,19 +335,11 @@ class Validator:
"""
Create final report and write into a JSON file.
"""
- alldata = list()
- for i in range(0, len(self.workloads)):
- reportstas = {"Total Rule Count": self.alltotalcnt[i], "Passed Rule Count": self.allpassedcnt[i]}
- data = {"Metric Validation Statistics": reportstas, "Tests in Category": self.allfailtests[i],
- "Errors":self.allerrlist[i]}
- alldata.append({"Workload": self.workloads[i], "Report": data})
-
- json_str = json.dumps(alldata, indent=4)
- print("Test validation finished. Final report: ")
- print(json_str)
+ print(self.errlist)
if self.debug:
- allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]} for i in range(0, len(self.workloads))]
+ allres = [{"Workload": self.workloads[i], "Results": self.allresults[i]}
+ for i in range(0, len(self.workloads))]
self.json_dump(allres, self.datafname)
def check_rule(self, testtype, metric_list):
@@ -342,13 +357,13 @@ class Validator:
return True
# Start of Collector and Converter
- def convert(self, data: list, metricvalues:dict):
+ def convert(self, data: list, metricvalues: dict):
"""
Convert collected metric data from the -j output to dict of {metric_name:value}.
"""
for json_string in data:
try:
- result =json.loads(json_string)
+ result = json.loads(json_string)
if "metric-unit" in result and result["metric-unit"] != "(null)" and result["metric-unit"] != "":
name = result["metric-unit"].split(" ")[1] if len(result["metric-unit"].split(" ")) > 1 \
else result["metric-unit"]
@@ -365,9 +380,10 @@ class Validator:
print(" ".join(command))
cmd = subprocess.run(command, stderr=subprocess.PIPE, encoding='utf-8')
data = [x+'}' for x in cmd.stderr.split('}\n') if x]
+ if data[0][0] != '{':
+ data[0] = data[0][data[0].find('{'):]
return data
-
def collect_perf(self, workload: str):
"""
Collect metric data with "perf stat -M" on given workload with -a and -j.
@@ -385,14 +401,18 @@ class Validator:
if rule["TestType"] == "RelationshipTest":
metrics = [m["Name"] for m in rule["Metrics"]]
if not any(m not in collectlist[0] for m in metrics):
- collectlist[rule["RuleIndex"]] = [",".join(list(set(metrics)))]
+ collectlist[rule["RuleIndex"]] = [
+ ",".join(list(set(metrics)))]
for idx, metrics in collectlist.items():
- if idx == 0: wl = "true"
- else: wl = workload
+ if idx == 0:
+ wl = "true"
+ else:
+ wl = workload
for metric in metrics:
data = self._run_perf(metric, wl)
- if idx not in self.results: self.results[idx] = dict()
+ if idx not in self.results:
+ self.results[idx] = dict()
self.convert(data, self.results[idx])
return
@@ -412,7 +432,8 @@ class Validator:
2) create metric name list
"""
command = ['perf', 'list', '-j', '--details', 'metrics']
- cmd = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8')
+ cmd = subprocess.run(command, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, encoding='utf-8')
try:
data = json.loads(cmd.stdout)
for m in data:
@@ -453,12 +474,12 @@ class Validator:
rules = data['RelationshipRules']
self.skiplist = set([name.lower() for name in data['SkipList']])
self.rules = self.remove_unsupported_rules(rules)
- pctgrule = {'RuleIndex':0,
- 'TestType':'SingleMetricTest',
- 'RangeLower':'0',
+ pctgrule = {'RuleIndex': 0,
+ 'TestType': 'SingleMetricTest',
+ 'RangeLower': '0',
'RangeUpper': '100',
'ErrorThreshold': self.tolerance,
- 'Description':'Metrics in percent unit have value with in [0, 100]',
+ 'Description': 'Metrics in percent unit have value with in [0, 100]',
'Metrics': [{'Name': m.lower()} for m in self.pctgmetrics]}
self.rules.append(pctgrule)
@@ -469,8 +490,9 @@ class Validator:
idx += 1
if self.debug:
- #TODO: need to test and generate file name correctly
- data = {'RelationshipRules':self.rules, 'SupportedMetrics': [{"MetricName": name} for name in self.metrics]}
+ # TODO: need to test and generate file name correctly
+ data = {'RelationshipRules': self.rules, 'SupportedMetrics': [
+ {"MetricName": name} for name in self.metrics]}
self.json_dump(data, self.fullrulefname)
return
@@ -482,20 +504,17 @@ class Validator:
@param key: key to the dictionaries (index of self.workloads).
'''
self.allresults[key] = self.results
- self.allignoremetrics[key] = self.ignoremetrics
- self.allfailtests[key] = self.failtests
self.alltotalcnt[key] = self.totalcnt
self.allpassedcnt[key] = self.passedcnt
- self.allerrlist[key] = self.errlist
- #Initialize data structures before data validation of each workload
+ # Initialize data structures before data validation of each workload
def _init_data(self):
- testtypes = ['PositiveValueTest', 'RelationshipTest', 'SingleMetricTest']
+ testtypes = ['PositiveValueTest',
+ 'RelationshipTest', 'SingleMetricTest']
self.results = dict()
- self.ignoremetrics= set()
+ self.ignoremetrics = set()
self.errlist = list()
- self.failtests = {k:{'Total Tests':0, 'Passed Tests':0, 'Failed Tests':[]} for k in testtypes}
self.totalcnt = 0
self.passedcnt = 0
@@ -525,32 +544,33 @@ class Validator:
testtype = r['TestType']
if not self.check_rule(testtype, r['Metrics']):
continue
- if testtype == 'RelationshipTest':
+ if testtype == 'RelationshipTest':
self.relationship_test(r)
elif testtype == 'SingleMetricTest':
self.single_test(r)
else:
print("Unsupported Test Type: ", testtype)
- self.errlist.append("Unsupported Test Type from rule: " + r['RuleIndex'])
- self._storewldata(i)
print("Workload: ", self.workloads[i])
- print("Total metrics collected: ", self.failtests['PositiveValueTest']['Total Tests'])
- print("Non-negative metric count: ", self.failtests['PositiveValueTest']['Passed Tests'])
print("Total Test Count: ", self.totalcnt)
print("Passed Test Count: ", self.passedcnt)
-
+ self._storewldata(i)
self.create_report()
- return sum(self.alltotalcnt.values()) != sum(self.allpassedcnt.values())
+ return len(self.errlist) > 0
# End of Class Validator
def main() -> None:
- parser = argparse.ArgumentParser(description="Launch metric value validation")
-
- parser.add_argument("-rule", help="Base validation rule file", required=True)
- parser.add_argument("-output_dir", help="Path for validator output file, report file", required=True)
- parser.add_argument("-debug", help="Debug run, save intermediate data to files", action="store_true", default=False)
- parser.add_argument("-wl", help="Workload to run while data collection", default="true")
+ parser = argparse.ArgumentParser(
+ description="Launch metric value validation")
+
+ parser.add_argument(
+ "-rule", help="Base validation rule file", required=True)
+ parser.add_argument(
+ "-output_dir", help="Path for validator output file, report file", required=True)
+ parser.add_argument("-debug", help="Debug run, save intermediate data to files",
+ action="store_true", default=False)
+ parser.add_argument(
+ "-wl", help="Workload to run while data collection", default="true")
parser.add_argument("-m", help="Metric list to validate", default="")
args = parser.parse_args()
outpath = Path(args.output_dir)
@@ -559,8 +579,8 @@ def main() -> None:
datafile = Path.joinpath(outpath, 'perf_data.json')
validator = Validator(args.rule, reportf, debug=args.debug,
- datafname=datafile, fullrulefname=fullrule, workload=args.wl,
- metrics=args.m)
+ datafname=datafile, fullrulefname=fullrule, workload=args.wl,
+ metrics=args.m)
ret = validator.test()
return ret
@@ -569,6 +589,3 @@ def main() -> None:
if __name__ == "__main__":
import sys
sys.exit(main())
-
-
-
diff --git a/tools/perf/tests/shell/lib/stat_output.sh b/tools/perf/tests/shell/lib/stat_output.sh
index 3cc158a643..c81d6a9f79 100644
--- a/tools/perf/tests/shell/lib/stat_output.sh
+++ b/tools/perf/tests/shell/lib/stat_output.sh
@@ -97,6 +97,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking $1 output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoid and not root"
+ return
+ fi
+ perf stat --per-cluster -a $2 true
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking $1 output: per die "
diff --git a/tools/perf/tests/shell/perftool-testsuite_probe.sh b/tools/perf/tests/shell/perftool-testsuite_probe.sh
new file mode 100755
index 0000000000..a0fec33a03
--- /dev/null
+++ b/tools/perf/tests/shell/perftool-testsuite_probe.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# perftool-testsuite_probe
+# SPDX-License-Identifier: GPL-2.0
+
+test -d "$(dirname "$0")/base_probe" || exit 2
+cd "$(dirname "$0")/base_probe" || exit 2
+status=0
+
+PERFSUITE_RUN_DIR=$(mktemp -d /tmp/"$(basename "$0" .sh)".XXX)
+export PERFSUITE_RUN_DIR
+
+for testcase in setup.sh test_*; do # skip setup.sh if not present or not executable
+ test -x "$testcase" || continue
+ ./"$testcase"
+ (( status += $? ))
+done
+
+if ! [ "$PERFTEST_KEEP_LOGS" = "y" ]; then
+ rm -rf "$PERFSUITE_RUN_DIR"
+fi
+
+test $status -ne 0 && exit 1
+exit 0
diff --git a/tools/perf/tests/shell/stat+csv_output.sh b/tools/perf/tests/shell/stat+csv_output.sh
index f1818fa6d9..fc2d8cc6e5 100755
--- a/tools/perf/tests/shell/stat+csv_output.sh
+++ b/tools/perf/tests/shell/stat+csv_output.sh
@@ -42,6 +42,7 @@ function commachecker()
;; "--per-socket") exp=8
;; "--per-node") exp=8
;; "--per-die") exp=8
+ ;; "--per-cluster") exp=8
;; "--per-cache") exp=8
esac
@@ -79,6 +80,7 @@ then
check_system_wide_no_aggr "CSV" "$perf_cmd"
check_per_core "CSV" "$perf_cmd"
check_per_cache_instance "CSV" "$perf_cmd"
+ check_per_cluster "CSV" "$perf_cmd"
check_per_die "CSV" "$perf_cmd"
check_per_socket "CSV" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh
index 3bc900533a..2b9c6212df 100755
--- a/tools/perf/tests/shell/stat+json_output.sh
+++ b/tools/perf/tests/shell/stat+json_output.sh
@@ -122,6 +122,18 @@ check_per_cache_instance()
echo "[Success]"
}
+check_per_cluster()
+{
+ echo -n "Checking json output: per cluster "
+ if ParanoidAndNotRoot 0
+ then
+ echo "[Skip] paranoia and not root"
+ return
+ fi
+ perf stat -j --per-cluster -a true 2>&1 | $PYTHON $pythonchecker --per-cluster
+ echo "[Success]"
+}
+
check_per_die()
{
echo -n "Checking json output: per die "
@@ -200,6 +212,7 @@ then
check_system_wide_no_aggr
check_per_core
check_per_cache_instance
+ check_per_cluster
check_per_die
check_per_socket
else
diff --git a/tools/perf/tests/shell/stat+std_output.sh b/tools/perf/tests/shell/stat+std_output.sh
index 4fcdd1a914..cbf2894b2c 100755
--- a/tools/perf/tests/shell/stat+std_output.sh
+++ b/tools/perf/tests/shell/stat+std_output.sh
@@ -13,7 +13,7 @@ stat_output=$(mktemp /tmp/__perf_test.stat_output.std.XXXXX)
event_name=(cpu-clock task-clock context-switches cpu-migrations page-faults stalled-cycles-frontend stalled-cycles-backend cycles instructions branches branch-misses)
event_metric=("CPUs utilized" "CPUs utilized" "/sec" "/sec" "/sec" "frontend cycles idle" "backend cycles idle" "GHz" "insn per cycle" "/sec" "of all branches")
-skip_metric=("stalled cycles per insn" "tma_")
+skip_metric=("stalled cycles per insn" "tma_" "retiring" "frontend_bound" "bad_speculation" "backend_bound")
cleanup() {
rm -f "${stat_output}"
@@ -40,6 +40,7 @@ function commachecker()
;; "--per-node") prefix=3
;; "--per-die") prefix=3
;; "--per-cache") prefix=3
+ ;; "--per-cluster") prefix=3
esac
while read line
@@ -99,6 +100,7 @@ then
check_system_wide_no_aggr "STD" "$perf_cmd"
check_per_core "STD" "$perf_cmd"
check_per_cache_instance "STD" "$perf_cmd"
+ check_per_cluster "STD" "$perf_cmd"
check_per_die "STD" "$perf_cmd"
check_per_socket "STD" "$perf_cmd"
else
diff --git a/tools/perf/tests/shell/stat_bpf_counters.sh b/tools/perf/tests/shell/stat_bpf_counters.sh
index a87bb2814b..2d92098747 100755
--- a/tools/perf/tests/shell/stat_bpf_counters.sh
+++ b/tools/perf/tests/shell/stat_bpf_counters.sh
@@ -4,19 +4,19 @@
set -e
-# check whether $2 is within +/- 10% of $1
+# check whether $2 is within +/- 20% of $1
compare_number()
{
first_num=$1
second_num=$2
- # upper bound is first_num * 110%
- upper=$(expr $first_num + $first_num / 10 )
- # lower bound is first_num * 90%
- lower=$(expr $first_num - $first_num / 10 )
+ # upper bound is first_num * 120%
+ upper=$(expr $first_num + $first_num / 5 )
+ # lower bound is first_num * 80%
+ lower=$(expr $first_num - $first_num / 5 )
if [ $second_num -gt $upper ] || [ $second_num -lt $lower ]; then
- echo "The difference between $first_num and $second_num are greater than 10%."
+ echo "The difference between $first_num and $second_num are greater than 20%."
exit 1
fi
}
diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh
index 7ca172599a..279f19c591 100755
--- a/tools/perf/tests/shell/stat_metrics_values.sh
+++ b/tools/perf/tests/shell/stat_metrics_values.sh
@@ -19,6 +19,8 @@ echo "Output will be stored in: $tmpdir"
$PYTHON $pythonvalidator -rule $rulefile -output_dir $tmpdir -wl "${workload}"
ret=$?
rm -rf $tmpdir
-
+if [ $ret -ne 0 ]; then
+ echo "Metric validation return with erros. Please check metrics reported with errors."
+fi
exit $ret
diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
index e342e6c8aa..83b53591b1 100755
--- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh
+++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh
@@ -8,6 +8,12 @@ shelldir=$(dirname "$0")
lscpu | grep -q "aarch64" || exit 2
+if perf version --build-options | grep HAVE_DWARF_UNWIND_SUPPORT | grep -q OFF
+then
+ echo "Skipping, no dwarf unwind support"
+ exit 2
+fi
+
skip_test_missing_symbol leafloop
PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX)
diff --git a/tools/perf/tests/symbols.c b/tools/perf/tests/symbols.c
index 16e1c5502b..d208105919 100644
--- a/tools/perf/tests/symbols.c
+++ b/tools/perf/tests/symbols.c
@@ -41,6 +41,30 @@ static void exit_test_info(struct test_info *ti)
machine__delete(ti->machine);
}
+struct dso_map {
+ struct dso *dso;
+ struct map *map;
+};
+
+static int find_map_cb(struct map *map, void *d)
+{
+ struct dso_map *data = d;
+
+ if (map__dso(map) != data->dso)
+ return 0;
+ data->map = map;
+ return 1;
+}
+
+static struct map *find_module_map(struct machine *machine, struct dso *dso)
+{
+ struct dso_map data = { .dso = dso };
+
+ machine__for_each_kernel_map(machine, find_map_cb, &data);
+
+ return data.map;
+}
+
static void get_test_dso_filename(char *filename, size_t max_sz)
{
if (dso_to_test)
@@ -51,6 +75,26 @@ static void get_test_dso_filename(char *filename, size_t max_sz)
static int create_map(struct test_info *ti, char *filename, struct map **map_p)
{
+ struct dso *dso = machine__findnew_dso(ti->machine, filename);
+
+ /*
+ * If 'filename' matches a current kernel module, must use a kernel
+ * map. Find the one that already exists.
+ */
+ if (dso && dso->kernel) {
+ *map_p = find_module_map(ti->machine, dso);
+ dso__put(dso);
+ if (!*map_p) {
+ pr_debug("Failed to find map for current kernel module %s",
+ filename);
+ return TEST_FAIL;
+ }
+ map__get(*map_p);
+ return TEST_OK;
+ }
+
+ dso__put(dso);
+
/* Create a dummy map at 0x100000 */
*map_p = map__new(ti->machine, 0x100000, 0xffffffff, 0, NULL,
PROT_EXEC, 0, NULL, filename, ti->thread);
@@ -97,6 +141,26 @@ static int test_dso(struct dso *dso)
return ret;
}
+static int subdivided_dso_cb(struct dso *dso, struct machine *machine __maybe_unused, void *d)
+{
+ struct dso *text_dso = d;
+
+ if (dso != text_dso && strstarts(dso->short_name, text_dso->short_name))
+ if (test_dso(dso) != TEST_OK)
+ return -1;
+
+ return 0;
+}
+
+static int process_subdivided_dso(struct machine *machine, struct dso *dso)
+{
+ int ret;
+
+ ret = machine__for_each_dso(machine, subdivided_dso_cb, dso);
+
+ return ret < 0 ? TEST_FAIL : TEST_OK;
+}
+
static int test_file(struct test_info *ti, char *filename)
{
struct map *map = NULL;
@@ -124,6 +188,10 @@ static int test_file(struct test_info *ti, char *filename)
}
ret = test_dso(dso);
+
+ /* Module dso is split into many dsos by section */
+ if (ret == TEST_OK && dso->kernel)
+ ret = process_subdivided_dso(ti->machine, dso);
out_put:
map__put(map);
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
new file mode 100644
index 0000000000..e2042b3682
--- /dev/null
+++ b/tools/perf/tests/tests-scripts.c
@@ -0,0 +1,257 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <subcmd/exec-cmd.h>
+#include <subcmd/parse-options.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <api/io.h>
+#include "builtin.h"
+#include "tests-scripts.h"
+#include "color.h"
+#include "debug.h"
+#include "hist.h"
+#include "intlist.h"
+#include "string2.h"
+#include "symbol.h"
+#include "tests.h"
+#include "util/rlimit.h"
+#include "util/util.h"
+
+static int shell_tests__dir_fd(void)
+{
+ char path[PATH_MAX], *exec_path;
+ static const char * const devel_dirs[] = { "./tools/perf/tests/shell", "./tests/shell", };
+
+ for (size_t i = 0; i < ARRAY_SIZE(devel_dirs); ++i) {
+ int fd = open(devel_dirs[i], O_PATH);
+
+ if (fd >= 0)
+ return fd;
+ }
+
+ /* Then installed path. */
+ exec_path = get_argv_exec_path();
+ scnprintf(path, sizeof(path), "%s/tests/shell", exec_path);
+ free(exec_path);
+ return open(path, O_PATH);
+}
+
+static char *shell_test__description(int dir_fd, const char *name)
+{
+ struct io io;
+ char buf[128], desc[256];
+ int ch, pos = 0;
+
+ io__init(&io, openat(dir_fd, name, O_RDONLY), buf, sizeof(buf));
+ if (io.fd < 0)
+ return NULL;
+
+ /* Skip first line - should be #!/bin/sh Shebang */
+ if (io__get_char(&io) != '#')
+ goto err_out;
+ if (io__get_char(&io) != '!')
+ goto err_out;
+ do {
+ ch = io__get_char(&io);
+ if (ch < 0)
+ goto err_out;
+ } while (ch != '\n');
+
+ do {
+ ch = io__get_char(&io);
+ if (ch < 0)
+ goto err_out;
+ } while (ch == '#' || isspace(ch));
+ while (ch > 0 && ch != '\n') {
+ desc[pos++] = ch;
+ if (pos >= (int)sizeof(desc) - 1)
+ break;
+ ch = io__get_char(&io);
+ }
+ while (pos > 0 && isspace(desc[--pos]))
+ ;
+ desc[++pos] = '\0';
+ close(io.fd);
+ return strdup(desc);
+err_out:
+ close(io.fd);
+ return NULL;
+}
+
+/* Is this full file path a shell script */
+static bool is_shell_script(int dir_fd, const char *path)
+{
+ const char *ext;
+
+ ext = strrchr(path, '.');
+ if (!ext)
+ return false;
+ if (!strcmp(ext, ".sh")) { /* Has .sh extension */
+ if (faccessat(dir_fd, path, R_OK | X_OK, 0) == 0) /* Is executable */
+ return true;
+ }
+ return false;
+}
+
+/* Is this file in this dir a shell script (for test purposes) */
+static bool is_test_script(int dir_fd, const char *name)
+{
+ return is_shell_script(dir_fd, name);
+}
+
+/* Duplicate a string and fall over and die if we run out of memory */
+static char *strdup_check(const char *str)
+{
+ char *newstr;
+
+ newstr = strdup(str);
+ if (!newstr) {
+ pr_err("Out of memory while duplicating test script string\n");
+ abort();
+ }
+ return newstr;
+}
+
+static int shell_test__run(struct test_suite *test, int subtest __maybe_unused)
+{
+ const char *file = test->priv;
+ int err;
+ char *cmd = NULL;
+
+ if (asprintf(&cmd, "%s%s", file, verbose ? " -v" : "") < 0)
+ return TEST_FAIL;
+ err = system(cmd);
+ free(cmd);
+ if (!err)
+ return TEST_OK;
+
+ return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
+}
+
+static void append_script(int dir_fd, const char *name, char *desc,
+ struct test_suite ***result,
+ size_t *result_sz)
+{
+ char filename[PATH_MAX], link[128];
+ struct test_suite *test_suite, **result_tmp;
+ struct test_case *tests;
+ size_t len;
+
+ snprintf(link, sizeof(link), "/proc/%d/fd/%d", getpid(), dir_fd);
+ len = readlink(link, filename, sizeof(filename));
+ if (len < 0) {
+ pr_err("Failed to readlink %s", link);
+ return;
+ }
+ filename[len++] = '/';
+ strcpy(&filename[len], name);
+
+ tests = calloc(2, sizeof(*tests));
+ if (!tests) {
+ pr_err("Out of memory while building script test suite list\n");
+ return;
+ }
+ tests[0].name = strdup_check(name);
+ tests[0].desc = strdup_check(desc);
+ tests[0].run_case = shell_test__run;
+
+ test_suite = zalloc(sizeof(*test_suite));
+ if (!test_suite) {
+ pr_err("Out of memory while building script test suite list\n");
+ free(tests);
+ return;
+ }
+ test_suite->desc = desc;
+ test_suite->test_cases = tests;
+ test_suite->priv = strdup_check(filename);
+ /* Realloc is good enough, though we could realloc by chunks, not that
+ * anyone will ever measure performance here */
+ result_tmp = realloc(*result, (*result_sz + 1) * sizeof(*result_tmp));
+ if (result_tmp == NULL) {
+ pr_err("Out of memory while building script test suite list\n");
+ free(tests);
+ free(test_suite);
+ return;
+ }
+ /* Add file to end and NULL terminate the struct array */
+ *result = result_tmp;
+ (*result)[*result_sz] = test_suite;
+ (*result_sz)++;
+}
+
+static void append_scripts_in_dir(int dir_fd,
+ struct test_suite ***result,
+ size_t *result_sz)
+{
+ struct dirent **entlist;
+ struct dirent *ent;
+ int n_dirs, i;
+
+ /* List files, sorted by alpha */
+ n_dirs = scandirat(dir_fd, ".", &entlist, NULL, alphasort);
+ if (n_dirs == -1)
+ return;
+ for (i = 0; i < n_dirs && (ent = entlist[i]); i++) {
+ int fd;
+
+ if (ent->d_name[0] == '.')
+ continue; /* Skip hidden files */
+ if (is_test_script(dir_fd, ent->d_name)) { /* It's a test */
+ char *desc = shell_test__description(dir_fd, ent->d_name);
+
+ if (desc) /* It has a desc line - valid script */
+ append_script(dir_fd, ent->d_name, desc, result, result_sz);
+ continue;
+ }
+ if (ent->d_type != DT_DIR) {
+ struct stat st;
+
+ if (ent->d_type != DT_UNKNOWN)
+ continue;
+ fstatat(dir_fd, ent->d_name, &st, 0);
+ if (!S_ISDIR(st.st_mode))
+ continue;
+ }
+ fd = openat(dir_fd, ent->d_name, O_PATH);
+ append_scripts_in_dir(fd, result, result_sz);
+ }
+ for (i = 0; i < n_dirs; i++) /* Clean up */
+ zfree(&entlist[i]);
+ free(entlist);
+}
+
+struct test_suite **create_script_test_suites(void)
+{
+ struct test_suite **result = NULL, **result_tmp;
+ size_t result_sz = 0;
+ int dir_fd = shell_tests__dir_fd(); /* Walk dir */
+
+ /*
+ * Append scripts if fd is good, otherwise return a NULL terminated zero
+ * length array.
+ */
+ if (dir_fd >= 0)
+ append_scripts_in_dir(dir_fd, &result, &result_sz);
+
+ result_tmp = realloc(result, (result_sz + 1) * sizeof(*result_tmp));
+ if (result_tmp == NULL) {
+ pr_err("Out of memory while building script test suite list\n");
+ abort();
+ }
+ /* NULL terminate the test suite array. */
+ result = result_tmp;
+ result[result_sz] = NULL;
+ if (dir_fd >= 0)
+ close(dir_fd);
+ return result;
+}
diff --git a/tools/perf/tests/tests-scripts.h b/tools/perf/tests/tests-scripts.h
new file mode 100644
index 0000000000..b553ad26ea
--- /dev/null
+++ b/tools/perf/tests/tests-scripts.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_SCRIPTS_H
+#define TESTS_SCRIPTS_H
+
+#include "tests.h"
+
+struct test_suite **create_script_test_suites(void);
+
+#endif /* TESTS_SCRIPTS_H */
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index dad3d74141..3aa7701ee0 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -4,11 +4,17 @@
#include <stdbool.h>
+enum {
+ TEST_OK = 0,
+ TEST_FAIL = -1,
+ TEST_SKIP = -2,
+};
+
#define TEST_ASSERT_VAL(text, cond) \
do { \
if (!(cond)) { \
pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
- return -1; \
+ return TEST_FAIL; \
} \
} while (0)
@@ -17,16 +23,10 @@ do { \
if (val != expected) { \
pr_debug("FAILED %s:%d %s (%d != %d)\n", \
__FILE__, __LINE__, text, val, expected); \
- return -1; \
+ return TEST_FAIL; \
} \
} while (0)
-enum {
- TEST_OK = 0,
- TEST_FAIL = -1,
- TEST_SKIP = -2,
-};
-
struct test_suite;
typedef int (*test_fnptr)(struct test_suite *, int);
diff --git a/tools/perf/tests/thread-maps-share.c b/tools/perf/tests/thread-maps-share.c
index 7fa6f7c568..e9ecd30a5c 100644
--- a/tools/perf/tests/thread-maps-share.c
+++ b/tools/perf/tests/thread-maps-share.c
@@ -46,9 +46,9 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(maps)), 4);
/* test the maps pointer is shared */
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t1)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t2)));
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(maps, thread__maps(t3)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t1)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t2)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(maps, thread__maps(t3)));
/*
* Verify the other leader was created by previous call.
@@ -73,7 +73,7 @@ static int test__thread_maps_share(struct test_suite *test __maybe_unused, int s
other_maps = thread__maps(other);
TEST_ASSERT_EQUAL("wrong refcnt", refcount_read(maps__refcnt(other_maps)), 2);
- TEST_ASSERT_VAL("maps don't match", RC_CHK_EQUAL(other_maps, thread__maps(other_leader)));
+ TEST_ASSERT_VAL("maps don't match", maps__equal(other_maps, thread__maps(other_leader)));
/* release thread group */
thread__put(t3);
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 822f893e67..fecbf851bb 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -131,9 +131,10 @@ static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
struct map *pair = maps__find_by_name(args->kallsyms.kmaps,
(dso->kernel ? dso->short_name : dso->name));
- if (pair)
+ if (pair) {
map__set_priv(pair, 1);
- else {
+ map__put(pair);
+ } else {
if (!args->header_printed) {
pr_info("WARN: Maps only in vmlinux:\n");
args->header_printed = true;
@@ -151,10 +152,8 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
pair = maps__find(args->kallsyms.kmaps, mem_start);
- if (pair == NULL || map__priv(pair))
- return 0;
- if (map__start(pair) == mem_start) {
+ if (pair != NULL && !map__priv(pair) && map__start(pair) == mem_start) {
struct dso *dso = map__dso(map);
if (!args->header_printed) {
@@ -170,6 +169,7 @@ static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
pr_info(" %s\n", dso->name);
map__set_priv(pair, 1);
}
+ map__put(pair);
return 0;
}