summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/resctrl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /tools/testing/selftests/resctrl
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/testing/selftests/resctrl')
-rw-r--r--tools/testing/selftests/resctrl/.gitignore2
-rw-r--r--tools/testing/selftests/resctrl/Makefile10
-rw-r--r--tools/testing/selftests/resctrl/README78
-rw-r--r--tools/testing/selftests/resctrl/cache.c320
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c229
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c142
-rw-r--r--tools/testing/selftests/resctrl/config2
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c218
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c177
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c145
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h125
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c281
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c763
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c746
-rw-r--r--tools/testing/selftests/resctrl/settings3
15 files changed, 3241 insertions, 0 deletions
diff --git a/tools/testing/selftests/resctrl/.gitignore b/tools/testing/selftests/resctrl/.gitignore
new file mode 100644
index 000000000..ab68442b6
--- /dev/null
+++ b/tools/testing/selftests/resctrl/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+resctrl_tests
diff --git a/tools/testing/selftests/resctrl/Makefile b/tools/testing/selftests/resctrl/Makefile
new file mode 100644
index 000000000..2deac2031
--- /dev/null
+++ b/tools/testing/selftests/resctrl/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS = -g -Wall -O2 -D_FORTIFY_SOURCE=2 -D_GNU_SOURCE
+CFLAGS += $(KHDR_INCLUDES)
+
+TEST_GEN_PROGS := resctrl_tests
+
+include ../lib.mk
+
+$(OUTPUT)/resctrl_tests: $(wildcard *.[ch])
diff --git a/tools/testing/selftests/resctrl/README b/tools/testing/selftests/resctrl/README
new file mode 100644
index 000000000..8d11ce7c2
--- /dev/null
+++ b/tools/testing/selftests/resctrl/README
@@ -0,0 +1,78 @@
+resctrl_tests - resctrl file system test suit
+
+Authors:
+ Fenghua Yu <fenghua.yu@intel.com>
+ Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+
+resctrl_tests tests various resctrl functionalities and interfaces including
+both software and hardware.
+
+Currently it supports Memory Bandwidth Monitoring test and Memory Bandwidth
+Allocation test on Intel RDT hardware. More tests will be added in the future.
+And the test suit can be extended to cover AMD QoS and ARM MPAM hardware
+as well.
+
+resctrl_tests can be run with or without kselftest framework.
+
+WITH KSELFTEST FRAMEWORK
+=======================
+
+BUILD
+-----
+
+Build executable file "resctrl_tests" from top level directory of the kernel source:
+ $ make -C tools/testing/selftests TARGETS=resctrl
+
+RUN
+---
+
+Run resctrl_tests as sudo or root since the test needs to mount resctrl file
+system and change contents in the file system.
+Using kselftest framework will run all supported tests within resctrl_tests:
+
+ $ sudo make -C tools/testing/selftests TARGETS=resctrl run_tests
+
+More details about kselftest framework can be found in
+Documentation/dev-tools/kselftest.rst.
+
+WITHOUT KSELFTEST FRAMEWORK
+===========================
+
+BUILD
+-----
+
+Build executable file "resctrl_tests" from this directory(tools/testing/selftests/resctrl/):
+ $ make
+
+RUN
+---
+
+Run resctrl_tests as sudo or root since the test needs to mount resctrl file
+system and change contents in the file system.
+Executing the test without any parameter will run all supported tests:
+
+ $ sudo ./resctrl_tests
+
+OVERVIEW OF EXECUTION
+=====================
+
+A test case has four stages:
+
+ - setup: mount resctrl file system, create group, setup schemata, move test
+ process pids to tasks, start benchmark.
+ - execute: let benchmark run
+ - verify: get resctrl data and verify the data with another source, e.g.
+ perf event.
+ - teardown: umount resctrl and clear temporary files.
+
+ARGUMENTS
+=========
+
+Parameter '-h' shows usage information.
+
+usage: resctrl_tests [-h] [-b "benchmark_cmd [options]"] [-t test list] [-n no_of_bits]
+ -b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT default benchmark is builtin fill_buf
+ -t test list: run tests specified in the test list, e.g. -t mbm,mba,cmt,cat
+ -n no_of_bits: run cache tests using specified no of bits in cache bit mask
+ -p cpu_no: specify CPU number to run the test. 1 is default
+ -h: help
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c
new file mode 100644
index 000000000..338f71445
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cache.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdint.h>
+#include "resctrl.h"
+
+struct read_format {
+ __u64 nr; /* The number of events */
+ struct {
+ __u64 value; /* The value of the event */
+ } values[2];
+};
+
+static struct perf_event_attr pea_llc_miss;
+static struct read_format rf_cqm;
+static int fd_lm;
+char llc_occup_path[1024];
+
+static void initialize_perf_event_attr(void)
+{
+ pea_llc_miss.type = PERF_TYPE_HARDWARE;
+ pea_llc_miss.size = sizeof(struct perf_event_attr);
+ pea_llc_miss.read_format = PERF_FORMAT_GROUP;
+ pea_llc_miss.exclude_kernel = 1;
+ pea_llc_miss.exclude_hv = 1;
+ pea_llc_miss.exclude_idle = 1;
+ pea_llc_miss.exclude_callchain_kernel = 1;
+ pea_llc_miss.inherit = 1;
+ pea_llc_miss.exclude_guest = 1;
+ pea_llc_miss.disabled = 1;
+}
+
+static void ioctl_perf_event_ioc_reset_enable(void)
+{
+ ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0);
+ ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+static int perf_event_open_llc_miss(pid_t pid, int cpu_no)
+{
+ fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1,
+ PERF_FLAG_FD_CLOEXEC);
+ if (fd_lm == -1) {
+ perror("Error opening leader");
+ ctrlc_handler(0, NULL, NULL);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int initialize_llc_perf(void)
+{
+ memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr));
+ memset(&rf_cqm, 0, sizeof(struct read_format));
+
+ /* Initialize perf_event_attr structures for HW_CACHE_MISSES */
+ initialize_perf_event_attr();
+
+ pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES;
+
+ rf_cqm.nr = 1;
+
+ return 0;
+}
+
+static int reset_enable_llc_perf(pid_t pid, int cpu_no)
+{
+ int ret = 0;
+
+ ret = perf_event_open_llc_miss(pid, cpu_no);
+ if (ret < 0)
+ return ret;
+
+ /* Start counters to log values */
+ ioctl_perf_event_ioc_reset_enable();
+
+ return 0;
+}
+
+/*
+ * get_llc_perf: llc cache miss through perf events
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ *
+ * Perf events like HW_CACHE_MISSES could be used to validate number of
+ * cache lines allocated.
+ *
+ * Return: =0 on success. <0 on failure.
+ */
+static int get_llc_perf(unsigned long *llc_perf_miss)
+{
+ __u64 total_misses;
+ int ret;
+
+ /* Stop counters after one span to get miss rate */
+
+ ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0);
+
+ ret = read(fd_lm, &rf_cqm, sizeof(struct read_format));
+ if (ret == -1) {
+ perror("Could not get llc misses through perf");
+ return -1;
+ }
+
+ total_misses = rf_cqm.values[0].value;
+ *llc_perf_miss = total_misses;
+
+ return 0;
+}
+
+/*
+ * Get LLC Occupancy as reported by RESCTRL FS
+ * For CMT,
+ * 1. If con_mon grp and mon grp given, then read from mon grp in
+ * con_mon grp
+ * 2. If only con_mon grp given, then read from con_mon grp
+ * 3. If both not given, then read from root con_mon grp
+ * For CAT,
+ * 1. If con_mon grp given, then read from it
+ * 2. If con_mon grp not given, then read from root con_mon grp
+ *
+ * Return: =0 on success. <0 on failure.
+ */
+static int get_llc_occu_resctrl(unsigned long *llc_occupancy)
+{
+ FILE *fp;
+
+ fp = fopen(llc_occup_path, "r");
+ if (!fp) {
+ perror("Failed to open results file");
+
+ return errno;
+ }
+ if (fscanf(fp, "%lu", llc_occupancy) <= 0) {
+ perror("Could not get llc occupancy");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * print_results_cache: the cache results are stored in a file
+ * @filename: file that stores the results
+ * @bm_pid: child pid that runs benchmark
+ * @llc_value: perf miss value /
+ * llc occupancy value reported by resctrl FS
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+static int print_results_cache(char *filename, int bm_pid,
+ unsigned long llc_value)
+{
+ FILE *fp;
+
+ if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
+ printf("Pid: %d \t LLC_value: %lu\n", bm_pid,
+ llc_value);
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+ perror("Cannot open results file");
+
+ return errno;
+ }
+ fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value);
+ fclose(fp);
+ }
+
+ return 0;
+}
+
+int measure_cache_vals(struct resctrl_val_param *param, int bm_pid)
+{
+ unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0;
+ int ret;
+
+ /*
+ * Measure cache miss from perf.
+ */
+ if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ ret = get_llc_perf(&llc_perf_miss);
+ if (ret < 0)
+ return ret;
+ llc_value = llc_perf_miss;
+ }
+
+ /*
+ * Measure llc occupancy from resctrl.
+ */
+ if (!strncmp(param->resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ ret = get_llc_occu_resctrl(&llc_occu_resc);
+ if (ret < 0)
+ return ret;
+ llc_value = llc_occu_resc;
+ }
+ ret = print_results_cache(param->filename, bm_pid, llc_value);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * cache_val: execute benchmark and measure LLC occupancy resctrl
+ * and perf cache miss for the benchmark
+ * @param: parameters passed to cache_val()
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int cat_val(struct resctrl_val_param *param)
+{
+ int malloc_and_init_memory = 1, memflush = 1, operation = 0, ret = 0;
+ char *resctrl_val = param->resctrl_val;
+ pid_t bm_pid;
+
+ if (strcmp(param->filename, "") == 0)
+ sprintf(param->filename, "stdio");
+
+ bm_pid = getpid();
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+ return ret;
+
+ /* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+ return ret;
+
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ ret = initialize_llc_perf();
+ if (ret)
+ return ret;
+ }
+
+ /* Test runs until the callback setup() tells the test to stop. */
+ while (1) {
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ ret = param->setup(1, param);
+ if (ret == END_OF_TESTS) {
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
+ break;
+ ret = reset_enable_llc_perf(bm_pid, param->cpu_no);
+ if (ret)
+ break;
+
+ if (run_fill_buf(param->span, malloc_and_init_memory,
+ memflush, operation, resctrl_val)) {
+ fprintf(stderr, "Error-running fill buffer\n");
+ ret = -1;
+ goto pe_close;
+ }
+
+ sleep(1);
+ ret = measure_cache_vals(param, bm_pid);
+ if (ret)
+ goto pe_close;
+
+ close(fd_lm);
+ } else {
+ break;
+ }
+ }
+
+ return ret;
+
+pe_close:
+ close(fd_lm);
+ return ret;
+}
+
+/*
+ * show_cache_info: show cache test result information
+ * @sum_llc_val: sum of LLC cache result data
+ * @no_of_bits: number of bits
+ * @cache_span: cache span in bytes for CMT or in lines for CAT
+ * @max_diff: max difference
+ * @max_diff_percent: max difference percentage
+ * @num_of_runs: number of runs
+ * @platform: show test information on this platform
+ * @cmt: CMT test or CAT test
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
+ unsigned long cache_span, unsigned long max_diff,
+ unsigned long max_diff_percent, unsigned long num_of_runs,
+ bool platform, bool cmt)
+{
+ unsigned long avg_llc_val = 0;
+ float diff_percent;
+ long avg_diff = 0;
+ int ret;
+
+ avg_llc_val = sum_llc_val / (num_of_runs - 1);
+ avg_diff = (long)abs(cache_span - avg_llc_val);
+ diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100;
+
+ ret = platform && abs((int)diff_percent) > max_diff_percent &&
+ (cmt ? (abs(avg_diff) > max_diff) : true);
+
+ ksft_print_msg("%s Check cache miss rate within %d%%\n",
+ ret ? "Fail:" : "Pass:", max_diff_percent);
+
+ ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent));
+ ksft_print_msg("Number of bits: %d\n", no_of_bits);
+ ksft_print_msg("Average LLC val: %lu\n", avg_llc_val);
+ ksft_print_msg("Cache span (%s): %lu\n", cmt ? "bytes" : "lines",
+ cache_span);
+
+ return ret;
+}
diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c
new file mode 100644
index 000000000..2d3c7c77a
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cat_test.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Allocation Technology (CAT) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+#include <unistd.h>
+
+#define RESULT_FILE_NAME1 "result_cat1"
+#define RESULT_FILE_NAME2 "result_cat2"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF_PERCENT 4
+#define MAX_DIFF 1000000
+
+static int count_of_bits;
+static char cbm_mask[256];
+static unsigned long long_mask;
+static unsigned long cache_size;
+
+/*
+ * Change schemata. Write schemata to specified
+ * con_mon grp, mon_grp in resctrl FS.
+ * Run 5 times in order to get average values.
+ */
+static int cat_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ char schemata[64];
+ va_list param;
+ int ret = 0;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Run NUM_OF_RUNS times */
+ if (p->num_of_runs >= NUM_OF_RUNS)
+ return END_OF_TESTS;
+
+ if (p->num_of_runs == 0) {
+ sprintf(schemata, "%lx", p->mask);
+ ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no,
+ p->resctrl_val);
+ }
+ p->num_of_runs++;
+
+ return ret;
+}
+
+static int check_results(struct resctrl_val_param *param)
+{
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_perf_miss = 0;
+ int runs = 0, no_of_bits = 0;
+ FILE *fp;
+
+ ksft_print_msg("Checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+ perror("# Cannot open file");
+
+ return errno;
+ }
+
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+ /*
+ * Discard the first value which is inaccurate due to monitoring
+ * setup transition phase.
+ */
+ if (runs > 0)
+ sum_llc_perf_miss += strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+
+ fclose(fp);
+ no_of_bits = count_bits(param->mask);
+
+ return show_cache_info(sum_llc_perf_miss, no_of_bits, param->span / 64,
+ MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ get_vendor() == ARCH_INTEL, false);
+}
+
+void cat_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME1);
+ remove(RESULT_FILE_NAME2);
+}
+
+int cat_perf_miss_val(int cpu_no, int n, char *cache_type)
+{
+ unsigned long l_mask, l_mask_1;
+ int ret, pipefd[2], sibling_cpu_no;
+ char pipe_message;
+ pid_t bm_pid;
+
+ cache_size = 0;
+
+ ret = remount_resctrlfs(true);
+ if (ret)
+ return ret;
+
+ /* Get default cbm mask for L3/L2 cache */
+ ret = get_cbm_mask(cache_type, cbm_mask);
+ if (ret)
+ return ret;
+
+ long_mask = strtoul(cbm_mask, NULL, 16);
+
+ /* Get L3/L2 cache size */
+ ret = get_cache_size(cpu_no, cache_type, &cache_size);
+ if (ret)
+ return ret;
+ ksft_print_msg("Cache size :%lu\n", cache_size);
+
+ /* Get max number of bits from default-cabm mask */
+ count_of_bits = count_bits(long_mask);
+
+ if (!n)
+ n = count_of_bits / 2;
+
+ if (n > count_of_bits - 1) {
+ ksft_print_msg("Invalid input value for no_of_bits n!\n");
+ ksft_print_msg("Please enter value in range 1 to %d\n",
+ count_of_bits - 1);
+ return -1;
+ }
+
+ /* Get core id from same socket for running another thread */
+ sibling_cpu_no = get_core_sibling(cpu_no);
+ if (sibling_cpu_no < 0)
+ return -1;
+
+ struct resctrl_val_param param = {
+ .resctrl_val = CAT_STR,
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 0,
+ .setup = cat_setup,
+ };
+
+ l_mask = long_mask >> n;
+ l_mask_1 = ~l_mask & long_mask;
+
+ /* Set param values for parent thread which will be allocated bitmask
+ * with (max_bits - n) bits
+ */
+ param.span = cache_size * (count_of_bits - n) / count_of_bits;
+ strcpy(param.ctrlgrp, "c2");
+ strcpy(param.mongrp, "m2");
+ strcpy(param.filename, RESULT_FILE_NAME2);
+ param.mask = l_mask;
+ param.num_of_runs = 0;
+
+ if (pipe(pipefd)) {
+ perror("# Unable to create pipe");
+ return errno;
+ }
+
+ bm_pid = fork();
+
+ /* Set param values for child thread which will be allocated bitmask
+ * with n bits
+ */
+ if (bm_pid == 0) {
+ param.mask = l_mask_1;
+ strcpy(param.ctrlgrp, "c1");
+ strcpy(param.mongrp, "m1");
+ param.span = cache_size * n / count_of_bits;
+ strcpy(param.filename, RESULT_FILE_NAME1);
+ param.num_of_runs = 0;
+ param.cpu_no = sibling_cpu_no;
+ }
+
+ remove(param.filename);
+
+ ret = cat_val(&param);
+ if (ret)
+ return ret;
+
+ ret = check_results(&param);
+ if (ret)
+ return ret;
+
+ if (bm_pid == 0) {
+ /* Tell parent that child is ready */
+ close(pipefd[0]);
+ pipe_message = 1;
+ if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ close(pipefd[1]);
+ perror("# failed signaling parent process");
+ return errno;
+ }
+
+ close(pipefd[1]);
+ while (1)
+ ;
+ } else {
+ /* Parent waits for child to be ready. */
+ close(pipefd[1]);
+ pipe_message = 0;
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message,
+ sizeof(pipe_message)) < sizeof(pipe_message)) {
+ perror("# failed reading from child process");
+ break;
+ }
+ }
+ close(pipefd[0]);
+ kill(bm_pid, SIGKILL);
+ }
+
+ cat_test_cleanup();
+ if (bm_pid)
+ umount_resctrlfs();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c
new file mode 100644
index 000000000..dd9f9db70
--- /dev/null
+++ b/tools/testing/selftests/resctrl/cmt_test.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cache Monitoring Technology (CMT) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+#include <unistd.h>
+
+#define RESULT_FILE_NAME "result_cmt"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF 2000000
+#define MAX_DIFF_PERCENT 15
+
+static int count_of_bits;
+static char cbm_mask[256];
+static unsigned long long_mask;
+static unsigned long cache_size;
+
+static int cmt_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ va_list param;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Run NUM_OF_RUNS times */
+ if (p->num_of_runs >= NUM_OF_RUNS)
+ return END_OF_TESTS;
+
+ p->num_of_runs++;
+
+ return 0;
+}
+
+static int check_results(struct resctrl_val_param *param, int no_of_bits)
+{
+ char *token_array[8], temp[512];
+ unsigned long sum_llc_occu_resc = 0;
+ int runs = 0;
+ FILE *fp;
+
+ ksft_print_msg("Checking for pass/fail\n");
+ fp = fopen(param->filename, "r");
+ if (!fp) {
+ perror("# Error in opening file\n");
+
+ return errno;
+ }
+
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ /* Field 3 is llc occ resc value */
+ if (runs > 0)
+ sum_llc_occu_resc += strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+ fclose(fp);
+
+ return show_cache_info(sum_llc_occu_resc, no_of_bits, param->span,
+ MAX_DIFF, MAX_DIFF_PERCENT, NUM_OF_RUNS,
+ true, true);
+}
+
+void cmt_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd)
+{
+ int ret, mum_resctrlfs;
+
+ cache_size = 0;
+ mum_resctrlfs = 1;
+
+ ret = remount_resctrlfs(mum_resctrlfs);
+ if (ret)
+ return ret;
+
+ ret = get_cbm_mask("L3", cbm_mask);
+ if (ret)
+ return ret;
+
+ long_mask = strtoul(cbm_mask, NULL, 16);
+
+ ret = get_cache_size(cpu_no, "L3", &cache_size);
+ if (ret)
+ return ret;
+ ksft_print_msg("Cache size :%lu\n", cache_size);
+
+ count_of_bits = count_bits(long_mask);
+
+ if (n < 1 || n > count_of_bits) {
+ ksft_print_msg("Invalid input value for numbr_of_bits n!\n");
+ ksft_print_msg("Please enter value in range 1 to %d\n", count_of_bits);
+ return -1;
+ }
+
+ struct resctrl_val_param param = {
+ .resctrl_val = CMT_STR,
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 0,
+ .filename = RESULT_FILE_NAME,
+ .mask = ~(long_mask << n) & long_mask,
+ .span = cache_size * n / count_of_bits,
+ .num_of_runs = 0,
+ .setup = cmt_setup,
+ };
+
+ if (strcmp(benchmark_cmd[0], "fill_buf") == 0)
+ sprintf(benchmark_cmd[1], "%lu", param.span);
+
+ remove(RESULT_FILE_NAME);
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results(&param, n);
+ if (ret)
+ return ret;
+
+ cmt_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/config b/tools/testing/selftests/resctrl/config
new file mode 100644
index 000000000..8d9f2deb5
--- /dev/null
+++ b/tools/testing/selftests/resctrl/config
@@ -0,0 +1,2 @@
+CONFIG_X86_CPU_RESCTRL=y
+CONFIG_PROC_CPU_RESCTRL=y
diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c
new file mode 100644
index 000000000..ab1d91328
--- /dev/null
+++ b/tools/testing/selftests/resctrl/fill_buf.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fill_buf benchmark
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <inttypes.h>
+#include <malloc.h>
+#include <string.h>
+
+#include "resctrl.h"
+
+#define CL_SIZE (64)
+#define PAGE_SIZE (4 * 1024)
+#define MB (1024 * 1024)
+
+static unsigned char *startptr;
+
+static void sb(void)
+{
+#if defined(__i386) || defined(__x86_64)
+ asm volatile("sfence\n\t"
+ : : : "memory");
+#endif
+}
+
+static void ctrl_handler(int signo)
+{
+ free(startptr);
+ printf("\nEnding\n");
+ sb();
+ exit(EXIT_SUCCESS);
+}
+
+static void cl_flush(void *p)
+{
+#if defined(__i386) || defined(__x86_64)
+ asm volatile("clflush (%0)\n\t"
+ : : "r"(p) : "memory");
+#endif
+}
+
+static void mem_flush(void *p, size_t s)
+{
+ char *cp = (char *)p;
+ size_t i = 0;
+
+ s = s / CL_SIZE; /* mem size in cache llines */
+
+ for (i = 0; i < s; i++)
+ cl_flush(&cp[i * CL_SIZE]);
+
+ sb();
+}
+
+static void *malloc_and_init_memory(size_t s)
+{
+ uint64_t *p64;
+ size_t s64;
+
+ void *p = memalign(PAGE_SIZE, s);
+ if (!p)
+ return NULL;
+
+ p64 = (uint64_t *)p;
+ s64 = s / sizeof(uint64_t);
+
+ while (s64 > 0) {
+ *p64 = (uint64_t)rand();
+ p64 += (CL_SIZE / sizeof(uint64_t));
+ s64 -= (CL_SIZE / sizeof(uint64_t));
+ }
+
+ return p;
+}
+
+static int fill_one_span_read(unsigned char *start_ptr, unsigned char *end_ptr)
+{
+ unsigned char sum, *p;
+
+ sum = 0;
+ p = start_ptr;
+ while (p < end_ptr) {
+ sum += *p;
+ p += (CL_SIZE / 2);
+ }
+
+ return sum;
+}
+
+static
+void fill_one_span_write(unsigned char *start_ptr, unsigned char *end_ptr)
+{
+ unsigned char *p;
+
+ p = start_ptr;
+ while (p < end_ptr) {
+ *p = '1';
+ p += (CL_SIZE / 2);
+ }
+}
+
+static int fill_cache_read(unsigned char *start_ptr, unsigned char *end_ptr,
+ char *resctrl_val)
+{
+ int ret = 0;
+ FILE *fp;
+
+ while (1) {
+ ret = fill_one_span_read(start_ptr, end_ptr);
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ break;
+ }
+
+ /* Consume read result so that reading memory is not optimized out. */
+ fp = fopen("/dev/null", "w");
+ if (!fp) {
+ perror("Unable to write to /dev/null");
+ return -1;
+ }
+ fprintf(fp, "Sum: %d ", ret);
+ fclose(fp);
+
+ return 0;
+}
+
+static int fill_cache_write(unsigned char *start_ptr, unsigned char *end_ptr,
+ char *resctrl_val)
+{
+ while (1) {
+ fill_one_span_write(start_ptr, end_ptr);
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)))
+ break;
+ }
+
+ return 0;
+}
+
+static int
+fill_cache(unsigned long long buf_size, int malloc_and_init, int memflush,
+ int op, char *resctrl_val)
+{
+ unsigned char *start_ptr, *end_ptr;
+ unsigned long long i;
+ int ret;
+
+ if (malloc_and_init)
+ start_ptr = malloc_and_init_memory(buf_size);
+ else
+ start_ptr = malloc(buf_size);
+
+ if (!start_ptr)
+ return -1;
+
+ startptr = start_ptr;
+ end_ptr = start_ptr + buf_size;
+
+ /*
+ * It's better to touch the memory once to avoid any compiler
+ * optimizations
+ */
+ if (!malloc_and_init) {
+ for (i = 0; i < buf_size; i++)
+ *start_ptr++ = (unsigned char)rand();
+ }
+
+ start_ptr = startptr;
+
+ /* Flush the memory before using to avoid "cache hot pages" effect */
+ if (memflush)
+ mem_flush(start_ptr, buf_size);
+
+ if (op == 0)
+ ret = fill_cache_read(start_ptr, end_ptr, resctrl_val);
+ else
+ ret = fill_cache_write(start_ptr, end_ptr, resctrl_val);
+
+ free(startptr);
+
+ if (ret) {
+ printf("\n Error in fill cache read/write...\n");
+ return -1;
+ }
+
+
+ return 0;
+}
+
+int run_fill_buf(unsigned long span, int malloc_and_init_memory,
+ int memflush, int op, char *resctrl_val)
+{
+ unsigned long long cache_size = span;
+ int ret;
+
+ /* set up ctrl-c handler */
+ if (signal(SIGINT, ctrl_handler) == SIG_ERR)
+ printf("Failed to catch SIGINT!\n");
+ if (signal(SIGHUP, ctrl_handler) == SIG_ERR)
+ printf("Failed to catch SIGHUP!\n");
+
+ ret = fill_cache(cache_size, malloc_and_init_memory, memflush, op,
+ resctrl_val);
+ if (ret) {
+ printf("\n Error in fill cache\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c
new file mode 100644
index 000000000..ff8b588b6
--- /dev/null
+++ b/tools/testing/selftests/resctrl/mba_test.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory Bandwidth Allocation (MBA) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define RESULT_FILE_NAME "result_mba"
+#define NUM_OF_RUNS 5
+#define MAX_DIFF_PERCENT 8
+#define ALLOCATION_MAX 100
+#define ALLOCATION_MIN 10
+#define ALLOCATION_STEP 10
+
+/*
+ * Change schemata percentage from 100 to 10%. Write schemata to specified
+ * con_mon grp, mon_grp in resctrl FS.
+ * For each allocation, run 5 times in order to get average values.
+ */
+static int mba_setup(int num, ...)
+{
+ static int runs_per_allocation, allocation = 100;
+ struct resctrl_val_param *p;
+ char allocation_str[64];
+ va_list param;
+ int ret;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ if (runs_per_allocation >= NUM_OF_RUNS)
+ runs_per_allocation = 0;
+
+ /* Only set up schemata once every NUM_OF_RUNS of allocations */
+ if (runs_per_allocation++ != 0)
+ return 0;
+
+ if (allocation < ALLOCATION_MIN || allocation > ALLOCATION_MAX)
+ return END_OF_TESTS;
+
+ sprintf(allocation_str, "%d", allocation);
+
+ ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no,
+ p->resctrl_val);
+ if (ret < 0)
+ return ret;
+
+ allocation -= ALLOCATION_STEP;
+
+ return 0;
+}
+
+static void show_mba_info(unsigned long *bw_imc, unsigned long *bw_resc)
+{
+ int allocation, runs;
+ bool failed = false;
+
+ ksft_print_msg("Results are displayed in (MB)\n");
+ /* Memory bandwidth from 100% down to 10% */
+ for (allocation = 0; allocation < ALLOCATION_MAX / ALLOCATION_STEP;
+ allocation++) {
+ unsigned long avg_bw_imc, avg_bw_resc;
+ unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
+ int avg_diff_per;
+ float avg_diff;
+
+ /*
+ * The first run is discarded due to inaccurate value from
+ * phase transition.
+ */
+ for (runs = NUM_OF_RUNS * allocation + 1;
+ runs < NUM_OF_RUNS * allocation + NUM_OF_RUNS ; runs++) {
+ sum_bw_imc += bw_imc[runs];
+ sum_bw_resc += bw_resc[runs];
+ }
+
+ avg_bw_imc = sum_bw_imc / (NUM_OF_RUNS - 1);
+ avg_bw_resc = sum_bw_resc / (NUM_OF_RUNS - 1);
+ avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
+ avg_diff_per = (int)(avg_diff * 100);
+
+ ksft_print_msg("%s Check MBA diff within %d%% for schemata %u\n",
+ avg_diff_per > MAX_DIFF_PERCENT ?
+ "Fail:" : "Pass:",
+ MAX_DIFF_PERCENT,
+ ALLOCATION_MAX - ALLOCATION_STEP * allocation);
+
+ ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
+ ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
+ ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
+ if (avg_diff_per > MAX_DIFF_PERCENT)
+ failed = true;
+ }
+
+ ksft_print_msg("%s Check schemata change using MBA\n",
+ failed ? "Fail:" : "Pass:");
+ if (failed)
+ ksft_print_msg("At least one test failed\n");
+}
+
+static int check_results(void)
+{
+ char *token_array[8], output[] = RESULT_FILE_NAME, temp[512];
+ unsigned long bw_imc[1024], bw_resc[1024];
+ int runs;
+ FILE *fp;
+
+ fp = fopen(output, "r");
+ if (!fp) {
+ perror(output);
+
+ return errno;
+ }
+
+ runs = 0;
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int fields = 0;
+
+ while (token) {
+ token_array[fields++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ /* Field 3 is perf imc value */
+ bw_imc[runs] = strtoul(token_array[3], NULL, 0);
+ /* Field 5 is resctrl value */
+ bw_resc[runs] = strtoul(token_array[5], NULL, 0);
+ runs++;
+ }
+
+ fclose(fp);
+
+ show_mba_info(bw_imc, bw_resc);
+
+ return 0;
+}
+
+void mba_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd)
+{
+ struct resctrl_val_param param = {
+ .resctrl_val = MBA_STR,
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 1,
+ .filename = RESULT_FILE_NAME,
+ .bw_report = bw_report,
+ .setup = mba_setup
+ };
+ int ret;
+
+ remove(RESULT_FILE_NAME);
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results();
+ if (ret)
+ return ret;
+
+ mba_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c
new file mode 100644
index 000000000..5dc1dce89
--- /dev/null
+++ b/tools/testing/selftests/resctrl/mbm_test.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory Bandwidth Monitoring (MBM) test
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define RESULT_FILE_NAME "result_mbm"
+#define MAX_DIFF_PERCENT 8
+#define NUM_OF_RUNS 5
+
+static int
+show_bw_info(unsigned long *bw_imc, unsigned long *bw_resc, int span)
+{
+ unsigned long avg_bw_imc = 0, avg_bw_resc = 0;
+ unsigned long sum_bw_imc = 0, sum_bw_resc = 0;
+ int runs, ret, avg_diff_per;
+ float avg_diff = 0;
+
+ /*
+ * Discard the first value which is inaccurate due to monitoring setup
+ * transition phase.
+ */
+ for (runs = 1; runs < NUM_OF_RUNS ; runs++) {
+ sum_bw_imc += bw_imc[runs];
+ sum_bw_resc += bw_resc[runs];
+ }
+
+ avg_bw_imc = sum_bw_imc / 4;
+ avg_bw_resc = sum_bw_resc / 4;
+ avg_diff = (float)labs(avg_bw_resc - avg_bw_imc) / avg_bw_imc;
+ avg_diff_per = (int)(avg_diff * 100);
+
+ ret = avg_diff_per > MAX_DIFF_PERCENT;
+ ksft_print_msg("%s Check MBM diff within %d%%\n",
+ ret ? "Fail:" : "Pass:", MAX_DIFF_PERCENT);
+ ksft_print_msg("avg_diff_per: %d%%\n", avg_diff_per);
+ ksft_print_msg("Span (MB): %d\n", span);
+ ksft_print_msg("avg_bw_imc: %lu\n", avg_bw_imc);
+ ksft_print_msg("avg_bw_resc: %lu\n", avg_bw_resc);
+
+ return ret;
+}
+
+static int check_results(int span)
+{
+ unsigned long bw_imc[NUM_OF_RUNS], bw_resc[NUM_OF_RUNS];
+ char temp[1024], *token_array[8];
+ char output[] = RESULT_FILE_NAME;
+ int runs, ret;
+ FILE *fp;
+
+ ksft_print_msg("Checking for pass/fail\n");
+
+ fp = fopen(output, "r");
+ if (!fp) {
+ perror(output);
+
+ return errno;
+ }
+
+ runs = 0;
+ while (fgets(temp, sizeof(temp), fp)) {
+ char *token = strtok(temp, ":\t");
+ int i = 0;
+
+ while (token) {
+ token_array[i++] = token;
+ token = strtok(NULL, ":\t");
+ }
+
+ bw_resc[runs] = strtoul(token_array[5], NULL, 0);
+ bw_imc[runs] = strtoul(token_array[3], NULL, 0);
+ runs++;
+ }
+
+ ret = show_bw_info(bw_imc, bw_resc, span);
+
+ fclose(fp);
+
+ return ret;
+}
+
+static int mbm_setup(int num, ...)
+{
+ struct resctrl_val_param *p;
+ static int num_of_runs;
+ va_list param;
+ int ret = 0;
+
+ /* Run NUM_OF_RUNS times */
+ if (num_of_runs++ >= NUM_OF_RUNS)
+ return END_OF_TESTS;
+
+ va_start(param, num);
+ p = va_arg(param, struct resctrl_val_param *);
+ va_end(param);
+
+ /* Set up shemata with 100% allocation on the first run. */
+ if (num_of_runs == 0)
+ ret = write_schemata(p->ctrlgrp, "100", p->cpu_no,
+ p->resctrl_val);
+
+ return ret;
+}
+
+void mbm_test_cleanup(void)
+{
+ remove(RESULT_FILE_NAME);
+}
+
+int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd)
+{
+ struct resctrl_val_param param = {
+ .resctrl_val = MBM_STR,
+ .ctrlgrp = "c1",
+ .mongrp = "m1",
+ .span = span,
+ .cpu_no = cpu_no,
+ .mum_resctrlfs = 1,
+ .filename = RESULT_FILE_NAME,
+ .bw_report = bw_report,
+ .setup = mbm_setup
+ };
+ int ret;
+
+ remove(RESULT_FILE_NAME);
+
+ ret = resctrl_val(benchmark_cmd, &param);
+ if (ret)
+ return ret;
+
+ ret = check_results(span);
+ if (ret)
+ return ret;
+
+ mbm_test_cleanup();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h
new file mode 100644
index 000000000..4597bba66
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef RESCTRL_H
+#define RESCTRL_H
+#include <stdio.h>
+#include <stdarg.h>
+#include <math.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <dirent.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/select.h>
+#include <sys/time.h>
+#include <sys/eventfd.h>
+#include <asm/unistd.h>
+#include <linux/perf_event.h>
+#include "../kselftest.h"
+
+#define MB (1024 * 1024)
+#define RESCTRL_PATH "/sys/fs/resctrl"
+#define PHYS_ID_PATH "/sys/devices/system/cpu/cpu"
+#define CBM_MASK_PATH "/sys/fs/resctrl/info"
+#define L3_PATH "/sys/fs/resctrl/info/L3"
+#define MB_PATH "/sys/fs/resctrl/info/MB"
+#define L3_MON_PATH "/sys/fs/resctrl/info/L3_MON"
+#define L3_MON_FEATURES_PATH "/sys/fs/resctrl/info/L3_MON/mon_features"
+
+#define ARCH_INTEL 1
+#define ARCH_AMD 2
+
+#define END_OF_TESTS 1
+
+#define PARENT_EXIT(err_msg) \
+ do { \
+ perror(err_msg); \
+ kill(ppid, SIGKILL); \
+ umount_resctrlfs(); \
+ exit(EXIT_FAILURE); \
+ } while (0)
+
+/*
+ * resctrl_val_param: resctrl test parameters
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number to which the benchmark would be binded
+ * @span: Memory bytes accessed in each benchmark iteration
+ * @mum_resctrlfs: Should the resctrl FS be remounted?
+ * @filename: Name of file to which the o/p should be written
+ * @bw_report: Bandwidth report type (reads vs writes)
+ * @setup: Call back function to setup test environment
+ */
+struct resctrl_val_param {
+ char *resctrl_val;
+ char ctrlgrp[64];
+ char mongrp[64];
+ int cpu_no;
+ unsigned long span;
+ int mum_resctrlfs;
+ char filename[64];
+ char *bw_report;
+ unsigned long mask;
+ int num_of_runs;
+ int (*setup)(int num, ...);
+};
+
+#define MBM_STR "mbm"
+#define MBA_STR "mba"
+#define CMT_STR "cmt"
+#define CAT_STR "cat"
+
+extern pid_t bm_pid, ppid;
+
+extern char llc_occup_path[1024];
+
+int get_vendor(void);
+bool check_resctrlfs_support(void);
+int filter_dmesg(void);
+int remount_resctrlfs(bool mum_resctrlfs);
+int get_resource_id(int cpu_no, int *resource_id);
+int umount_resctrlfs(void);
+int validate_bw_report_request(char *bw_report);
+bool validate_resctrl_feature_request(const char *resctrl_val);
+char *fgrep(FILE *inf, const char *str);
+int taskset_benchmark(pid_t bm_pid, int cpu_no);
+void run_benchmark(int signum, siginfo_t *info, void *ucontext);
+int write_schemata(char *ctrlgrp, char *schemata, int cpu_no,
+ char *resctrl_val);
+int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ char *resctrl_val);
+int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags);
+int run_fill_buf(unsigned long span, int malloc_and_init_memory, int memflush,
+ int op, char *resctrl_va);
+int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param);
+int mbm_bw_change(int span, int cpu_no, char *bw_report, char **benchmark_cmd);
+void tests_cleanup(void);
+void mbm_test_cleanup(void);
+int mba_schemata_change(int cpu_no, char *bw_report, char **benchmark_cmd);
+void mba_test_cleanup(void);
+int get_cbm_mask(char *cache_type, char *cbm_mask);
+int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size);
+void ctrlc_handler(int signum, siginfo_t *info, void *ptr);
+int cat_val(struct resctrl_val_param *param);
+void cat_test_cleanup(void);
+int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type);
+int cmt_resctrl_val(int cpu_no, int n, char **benchmark_cmd);
+unsigned int count_bits(unsigned long n);
+void cmt_test_cleanup(void);
+int get_core_sibling(int cpu_no);
+int measure_cache_vals(struct resctrl_val_param *param, int bm_pid);
+int show_cache_info(unsigned long sum_llc_val, int no_of_bits,
+ unsigned long cache_span, unsigned long max_diff,
+ unsigned long max_diff_percent, unsigned long num_of_runs,
+ bool platform, bool cmt);
+
+#endif /* RESCTRL_H */
diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c
new file mode 100644
index 000000000..4418155a8
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl_tests.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Resctrl tests
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define BENCHMARK_ARGS 64
+#define BENCHMARK_ARG_SIZE 64
+
+static int detect_vendor(void)
+{
+ FILE *inf = fopen("/proc/cpuinfo", "r");
+ int vendor_id = 0;
+ char *s = NULL;
+ char *res;
+
+ if (!inf)
+ return vendor_id;
+
+ res = fgrep(inf, "vendor_id");
+
+ if (res)
+ s = strchr(res, ':');
+
+ if (s && !strcmp(s, ": GenuineIntel\n"))
+ vendor_id = ARCH_INTEL;
+ else if (s && !strcmp(s, ": AuthenticAMD\n"))
+ vendor_id = ARCH_AMD;
+
+ fclose(inf);
+ free(res);
+ return vendor_id;
+}
+
+int get_vendor(void)
+{
+ static int vendor = -1;
+
+ if (vendor == -1)
+ vendor = detect_vendor();
+ if (vendor == 0)
+ ksft_print_msg("Can not get vendor info...\n");
+
+ return vendor;
+}
+
+static void cmd_help(void)
+{
+ printf("usage: resctrl_tests [-h] [-b \"benchmark_cmd [options]\"] [-t test list] [-n no_of_bits]\n");
+ printf("\t-b benchmark_cmd [options]: run specified benchmark for MBM, MBA and CMT\n");
+ printf("\t default benchmark is builtin fill_buf\n");
+ printf("\t-t test list: run tests specified in the test list, ");
+ printf("e.g. -t mbm,mba,cmt,cat\n");
+ printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n");
+ printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n");
+ printf("\t-h: help\n");
+}
+
+void tests_cleanup(void)
+{
+ mbm_test_cleanup();
+ mba_test_cleanup();
+ cmt_test_cleanup();
+ cat_test_cleanup();
+}
+
+static void run_mbm_test(bool has_ben, char **benchmark_cmd, int span,
+ int cpu_no, char *bw_report)
+{
+ int res;
+
+ ksft_print_msg("Starting MBM BW change ...\n");
+
+ if (!validate_resctrl_feature_request(MBM_STR)) {
+ ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", MBA_STR);
+ res = mbm_bw_change(span, cpu_no, bw_report, benchmark_cmd);
+ ksft_test_result(!res, "MBM: bw change\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+ mbm_test_cleanup();
+}
+
+static void run_mba_test(bool has_ben, char **benchmark_cmd, int span,
+ int cpu_no, char *bw_report)
+{
+ int res;
+
+ ksft_print_msg("Starting MBA Schemata change ...\n");
+
+ if (!validate_resctrl_feature_request(MBA_STR)) {
+ ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[1], "%d", span);
+ res = mba_schemata_change(cpu_no, bw_report, benchmark_cmd);
+ ksft_test_result(!res, "MBA: schemata change\n");
+ mba_test_cleanup();
+}
+
+static void run_cmt_test(bool has_ben, char **benchmark_cmd, int cpu_no)
+{
+ int res;
+
+ ksft_print_msg("Starting CMT test ...\n");
+ if (!validate_resctrl_feature_request(CMT_STR)) {
+ ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n");
+ return;
+ }
+
+ if (!has_ben)
+ sprintf(benchmark_cmd[5], "%s", CMT_STR);
+ res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd);
+ ksft_test_result(!res, "CMT: test\n");
+ if ((get_vendor() == ARCH_INTEL) && res)
+ ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n");
+ cmt_test_cleanup();
+}
+
+static void run_cat_test(int cpu_no, int no_of_bits)
+{
+ int res;
+
+ ksft_print_msg("Starting CAT test ...\n");
+
+ if (!validate_resctrl_feature_request(CAT_STR)) {
+ ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n");
+ return;
+ }
+
+ res = cat_perf_miss_val(cpu_no, no_of_bits, "L3");
+ ksft_test_result(!res, "CAT: test\n");
+ cat_test_cleanup();
+}
+
+int main(int argc, char **argv)
+{
+ bool has_ben = false, mbm_test = true, mba_test = true, cmt_test = true;
+ int c, cpu_no = 1, span = 250, argc_new = argc, i, no_of_bits = 0;
+ char *benchmark_cmd[BENCHMARK_ARGS], bw_report[64], bm_type[64];
+ char benchmark_cmd_area[BENCHMARK_ARGS][BENCHMARK_ARG_SIZE];
+ int ben_ind, ben_count, tests = 0;
+ bool cat_test = true;
+
+ for (i = 0; i < argc; i++) {
+ if (strcmp(argv[i], "-b") == 0) {
+ ben_ind = i + 1;
+ ben_count = argc - ben_ind;
+ argc_new = ben_ind - 1;
+ has_ben = true;
+ break;
+ }
+ }
+
+ while ((c = getopt(argc_new, argv, "ht:b:n:p:")) != -1) {
+ char *token;
+
+ switch (c) {
+ case 't':
+ token = strtok(optarg, ",");
+
+ mbm_test = false;
+ mba_test = false;
+ cmt_test = false;
+ cat_test = false;
+ while (token) {
+ if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) {
+ mbm_test = true;
+ tests++;
+ } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) {
+ mba_test = true;
+ tests++;
+ } else if (!strncmp(token, CMT_STR, sizeof(CMT_STR))) {
+ cmt_test = true;
+ tests++;
+ } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) {
+ cat_test = true;
+ tests++;
+ } else {
+ printf("invalid argument\n");
+
+ return -1;
+ }
+ token = strtok(NULL, ",");
+ }
+ break;
+ case 'p':
+ cpu_no = atoi(optarg);
+ break;
+ case 'n':
+ no_of_bits = atoi(optarg);
+ if (no_of_bits <= 0) {
+ printf("Bail out! invalid argument for no_of_bits\n");
+ return -1;
+ }
+ break;
+ case 'h':
+ cmd_help();
+
+ return 0;
+ default:
+ printf("invalid argument\n");
+
+ return -1;
+ }
+ }
+
+ ksft_print_header();
+
+ /*
+ * Typically we need root privileges, because:
+ * 1. We write to resctrl FS
+ * 2. We execute perf commands
+ */
+ if (geteuid() != 0)
+ return ksft_exit_skip("Not running as root. Skipping...\n");
+
+ if (has_ben) {
+ if (argc - ben_ind >= BENCHMARK_ARGS)
+ ksft_exit_fail_msg("Too long benchmark command.\n");
+
+ /* Extract benchmark command from command line. */
+ for (i = ben_ind; i < argc; i++) {
+ benchmark_cmd[i - ben_ind] = benchmark_cmd_area[i];
+ if (strlen(argv[i]) >= BENCHMARK_ARG_SIZE)
+ ksft_exit_fail_msg("Too long benchmark command argument.\n");
+ sprintf(benchmark_cmd[i - ben_ind], "%s", argv[i]);
+ }
+ benchmark_cmd[ben_count] = NULL;
+ } else {
+ /* If no benchmark is given by "-b" argument, use fill_buf. */
+ for (i = 0; i < 6; i++)
+ benchmark_cmd[i] = benchmark_cmd_area[i];
+
+ strcpy(benchmark_cmd[0], "fill_buf");
+ sprintf(benchmark_cmd[1], "%d", span);
+ strcpy(benchmark_cmd[2], "1");
+ strcpy(benchmark_cmd[3], "1");
+ strcpy(benchmark_cmd[4], "0");
+ strcpy(benchmark_cmd[5], "");
+ benchmark_cmd[6] = NULL;
+ }
+
+ sprintf(bw_report, "reads");
+ sprintf(bm_type, "fill_buf");
+
+ if (!check_resctrlfs_support())
+ return ksft_exit_skip("resctrl FS does not exist. Enable X86_CPU_RESCTRL config option.\n");
+
+ filter_dmesg();
+
+ ksft_set_plan(tests ? : 4);
+
+ if ((get_vendor() == ARCH_INTEL) && mbm_test)
+ run_mbm_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
+
+ if ((get_vendor() == ARCH_INTEL) && mba_test)
+ run_mba_test(has_ben, benchmark_cmd, span, cpu_no, bw_report);
+
+ if (cmt_test)
+ run_cmt_test(has_ben, benchmark_cmd, cpu_no);
+
+ if (cat_test)
+ run_cat_test(cpu_no, no_of_bits);
+
+ umount_resctrlfs();
+
+ return ksft_exit_pass();
+}
diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c
new file mode 100644
index 000000000..00864242d
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrl_val.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Memory bandwidth monitoring and allocation library
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+#define UNCORE_IMC "uncore_imc"
+#define READ_FILE_NAME "events/cas_count_read"
+#define WRITE_FILE_NAME "events/cas_count_write"
+#define DYN_PMU_PATH "/sys/bus/event_source/devices"
+#define SCALE 0.00006103515625
+#define MAX_IMCS 20
+#define MAX_TOKENS 5
+#define READ 0
+#define WRITE 1
+#define CON_MON_MBM_LOCAL_BYTES_PATH \
+ "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define CON_MBM_LOCAL_BYTES_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define MON_MBM_LOCAL_BYTES_PATH \
+ "%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define MBM_LOCAL_BYTES_PATH \
+ "%s/mon_data/mon_L3_%02d/mbm_local_bytes"
+
+#define CON_MON_LCC_OCCUP_PATH \
+ "%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define CON_LCC_OCCUP_PATH \
+ "%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define MON_LCC_OCCUP_PATH \
+ "%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+#define LCC_OCCUP_PATH \
+ "%s/mon_data/mon_L3_%02d/llc_occupancy"
+
+struct membw_read_format {
+ __u64 value; /* The value of the event */
+ __u64 time_enabled; /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
+ __u64 time_running; /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
+ __u64 id; /* if PERF_FORMAT_ID */
+};
+
+struct imc_counter_config {
+ __u32 type;
+ __u64 event;
+ __u64 umask;
+ struct perf_event_attr pe;
+ struct membw_read_format return_value;
+ int fd;
+};
+
+static char mbm_total_path[1024];
+static int imcs;
+static struct imc_counter_config imc_counters_config[MAX_IMCS][2];
+
+void membw_initialize_perf_event_attr(int i, int j)
+{
+ memset(&imc_counters_config[i][j].pe, 0,
+ sizeof(struct perf_event_attr));
+ imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type;
+ imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr);
+ imc_counters_config[i][j].pe.disabled = 1;
+ imc_counters_config[i][j].pe.inherit = 1;
+ imc_counters_config[i][j].pe.exclude_guest = 0;
+ imc_counters_config[i][j].pe.config =
+ imc_counters_config[i][j].umask << 8 |
+ imc_counters_config[i][j].event;
+ imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER;
+ imc_counters_config[i][j].pe.read_format =
+ PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
+}
+
+void membw_ioctl_perf_event_ioc_reset_enable(int i, int j)
+{
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0);
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0);
+}
+
+void membw_ioctl_perf_event_ioc_disable(int i, int j)
+{
+ ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0);
+}
+
+/*
+ * get_event_and_umask: Parse config into event and umask
+ * @cas_count_cfg: Config
+ * @count: iMC number
+ * @op: Operation (read/write)
+ */
+void get_event_and_umask(char *cas_count_cfg, int count, bool op)
+{
+ char *token[MAX_TOKENS];
+ int i = 0;
+
+ strcat(cas_count_cfg, ",");
+ token[0] = strtok(cas_count_cfg, "=,");
+
+ for (i = 1; i < MAX_TOKENS; i++)
+ token[i] = strtok(NULL, "=,");
+
+ for (i = 0; i < MAX_TOKENS; i++) {
+ if (!token[i])
+ break;
+ if (strcmp(token[i], "event") == 0) {
+ if (op == READ)
+ imc_counters_config[count][READ].event =
+ strtol(token[i + 1], NULL, 16);
+ else
+ imc_counters_config[count][WRITE].event =
+ strtol(token[i + 1], NULL, 16);
+ }
+ if (strcmp(token[i], "umask") == 0) {
+ if (op == READ)
+ imc_counters_config[count][READ].umask =
+ strtol(token[i + 1], NULL, 16);
+ else
+ imc_counters_config[count][WRITE].umask =
+ strtol(token[i + 1], NULL, 16);
+ }
+ }
+}
+
+static int open_perf_event(int i, int cpu_no, int j)
+{
+ imc_counters_config[i][j].fd =
+ perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1,
+ PERF_FLAG_FD_CLOEXEC);
+
+ if (imc_counters_config[i][j].fd == -1) {
+ fprintf(stderr, "Error opening leader %llx\n",
+ imc_counters_config[i][j].pe.config);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Get type and config (read and write) of an iMC counter */
+static int read_from_imc_dir(char *imc_dir, int count)
+{
+ char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024];
+ FILE *fp;
+
+ /* Get type of iMC counter */
+ sprintf(imc_counter_type, "%s%s", imc_dir, "type");
+ fp = fopen(imc_counter_type, "r");
+ if (!fp) {
+ perror("Failed to open imc counter type file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
+ perror("Could not get imc type");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ imc_counters_config[count][WRITE].type =
+ imc_counters_config[count][READ].type;
+
+ /* Get read config */
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+ perror("Failed to open imc config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+ perror("Could not get imc cas count read");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ get_event_and_umask(cas_count_cfg, count, READ);
+
+ /* Get write config */
+ sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
+ fp = fopen(imc_counter_cfg, "r");
+ if (!fp) {
+ perror("Failed to open imc config file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
+ perror("Could not get imc cas count write");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ get_event_and_umask(cas_count_cfg, count, WRITE);
+
+ return 0;
+}
+
+/*
+ * A system can have 'n' number of iMC (Integrated Memory Controller)
+ * counters, get that 'n'. For each iMC counter get it's type and config.
+ * Also, each counter has two configs, one for read and the other for write.
+ * A config again has two parts, event and umask.
+ * Enumerate all these details into an array of structures.
+ *
+ * Return: >= 0 on success. < 0 on failure.
+ */
+static int num_of_imcs(void)
+{
+ char imc_dir[512], *temp;
+ unsigned int count = 0;
+ struct dirent *ep;
+ int ret;
+ DIR *dp;
+
+ dp = opendir(DYN_PMU_PATH);
+ if (dp) {
+ while ((ep = readdir(dp))) {
+ temp = strstr(ep->d_name, UNCORE_IMC);
+ if (!temp)
+ continue;
+
+ /*
+ * imc counters are named as "uncore_imc_<n>", hence
+ * increment the pointer to point to <n>. Note that
+ * sizeof(UNCORE_IMC) would count for null character as
+ * well and hence the last underscore character in
+ * uncore_imc'_' need not be counted.
+ */
+ temp = temp + sizeof(UNCORE_IMC);
+
+ /*
+ * Some directories under "DYN_PMU_PATH" could have
+ * names like "uncore_imc_free_running", hence, check if
+ * first character is a numerical digit or not.
+ */
+ if (temp[0] >= '0' && temp[0] <= '9') {
+ sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
+ ep->d_name);
+ ret = read_from_imc_dir(imc_dir, count);
+ if (ret) {
+ closedir(dp);
+
+ return ret;
+ }
+ count++;
+ }
+ }
+ closedir(dp);
+ if (count == 0) {
+ perror("Unable find iMC counters!\n");
+
+ return -1;
+ }
+ } else {
+ perror("Unable to open PMU directory!\n");
+
+ return -1;
+ }
+
+ return count;
+}
+
+static int initialize_mem_bw_imc(void)
+{
+ int imc, j;
+
+ imcs = num_of_imcs();
+ if (imcs <= 0)
+ return imcs;
+
+ /* Initialize perf_event_attr structures for all iMC's */
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++)
+ membw_initialize_perf_event_attr(imc, j);
+ }
+
+ return 0;
+}
+
+/*
+ * get_mem_bw_imc: Memory band width as reported by iMC counters
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @bw_report: Bandwidth report type (reads, writes)
+ *
+ * Memory B/W utilized by a process on a socket can be calculated using
+ * iMC counters. Perf events are used to read these counters.
+ *
+ * Return: = 0 on success. < 0 on failure.
+ */
+static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
+{
+ float reads, writes, of_mul_read, of_mul_write;
+ int imc, j, ret;
+
+ /* Start all iMC counters to log values (both read and write) */
+ reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++) {
+ ret = open_perf_event(imc, cpu_no, j);
+ if (ret)
+ return -1;
+ }
+ for (j = 0; j < 2; j++)
+ membw_ioctl_perf_event_ioc_reset_enable(imc, j);
+ }
+
+ sleep(1);
+
+ /* Stop counters after a second to get results (both read and write) */
+ for (imc = 0; imc < imcs; imc++) {
+ for (j = 0; j < 2; j++)
+ membw_ioctl_perf_event_ioc_disable(imc, j);
+ }
+
+ /*
+ * Get results which are stored in struct type imc_counter_config
+ * Take over flow into consideration before calculating total b/w
+ */
+ for (imc = 0; imc < imcs; imc++) {
+ struct imc_counter_config *r =
+ &imc_counters_config[imc][READ];
+ struct imc_counter_config *w =
+ &imc_counters_config[imc][WRITE];
+
+ if (read(r->fd, &r->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+ perror("Couldn't get read b/w through iMC");
+
+ return -1;
+ }
+
+ if (read(w->fd, &w->return_value,
+ sizeof(struct membw_read_format)) == -1) {
+ perror("Couldn't get write bw through iMC");
+
+ return -1;
+ }
+
+ __u64 r_time_enabled = r->return_value.time_enabled;
+ __u64 r_time_running = r->return_value.time_running;
+
+ if (r_time_enabled != r_time_running)
+ of_mul_read = (float)r_time_enabled /
+ (float)r_time_running;
+
+ __u64 w_time_enabled = w->return_value.time_enabled;
+ __u64 w_time_running = w->return_value.time_running;
+
+ if (w_time_enabled != w_time_running)
+ of_mul_write = (float)w_time_enabled /
+ (float)w_time_running;
+ reads += r->return_value.value * of_mul_read * SCALE;
+ writes += w->return_value.value * of_mul_write * SCALE;
+ }
+
+ for (imc = 0; imc < imcs; imc++) {
+ close(imc_counters_config[imc][READ].fd);
+ close(imc_counters_config[imc][WRITE].fd);
+ }
+
+ if (strcmp(bw_report, "reads") == 0) {
+ *bw_imc = reads;
+ return 0;
+ }
+
+ if (strcmp(bw_report, "writes") == 0) {
+ *bw_imc = writes;
+ return 0;
+ }
+
+ *bw_imc = reads + writes;
+ return 0;
+}
+
+void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
+{
+ if (ctrlgrp && mongrp)
+ sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, ctrlgrp, mongrp, resource_id);
+ else if (!ctrlgrp && mongrp)
+ sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ mongrp, resource_id);
+ else if (ctrlgrp && !mongrp)
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ ctrlgrp, resource_id);
+ else if (!ctrlgrp && !mongrp)
+ sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
+ resource_id);
+}
+
+/*
+ * initialize_mem_bw_resctrl: Appropriately populate "mbm_total_path"
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ */
+static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
+ int cpu_no, char *resctrl_val)
+{
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ perror("Could not get resource_id");
+ return;
+ }
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
+ set_mbm_path(ctrlgrp, mongrp, resource_id);
+
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ if (ctrlgrp)
+ sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, ctrlgrp, resource_id);
+ else
+ sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
+ RESCTRL_PATH, resource_id);
+ }
+}
+
+/*
+ * Get MBM Local bytes as reported by resctrl FS
+ * For MBM,
+ * 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
+ * 2. If only con_mon grp is given, then read from con_mon grp
+ * 3. If both are not given, then read from root con_mon grp
+ * For MBA,
+ * 1. If con_mon grp is given, then read from it
+ * 2. If con_mon grp is not given, then read from root con_mon grp
+ */
+static int get_mem_bw_resctrl(unsigned long *mbm_total)
+{
+ FILE *fp;
+
+ fp = fopen(mbm_total_path, "r");
+ if (!fp) {
+ perror("Failed to open total bw file");
+
+ return -1;
+ }
+ if (fscanf(fp, "%lu", mbm_total) <= 0) {
+ perror("Could not get mbm local bytes");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+pid_t bm_pid, ppid;
+
+void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
+{
+ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+ tests_cleanup();
+ ksft_print_msg("Ending\n\n");
+
+ exit(EXIT_SUCCESS);
+}
+
+/*
+ * print_results_bw: the memory bandwidth results are stored in a file
+ * @filename: file that stores the results
+ * @bm_pid: child pid that runs benchmark
+ * @bw_imc: perf imc counter value
+ * @bw_resc: memory bandwidth value
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+static int print_results_bw(char *filename, int bm_pid, float bw_imc,
+ unsigned long bw_resc)
+{
+ unsigned long diff = fabs(bw_imc - bw_resc);
+ FILE *fp;
+
+ if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
+ printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
+ printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
+ } else {
+ fp = fopen(filename, "a");
+ if (!fp) {
+ perror("Cannot open results file");
+
+ return errno;
+ }
+ if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
+ bm_pid, bw_imc, bw_resc, diff) <= 0) {
+ fclose(fp);
+ perror("Could not log results.");
+
+ return errno;
+ }
+ fclose(fp);
+ }
+
+ return 0;
+}
+
+static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
+{
+ if (strlen(ctrlgrp) && strlen(mongrp))
+ sprintf(llc_occup_path, CON_MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ ctrlgrp, mongrp, sock_num);
+ else if (!strlen(ctrlgrp) && strlen(mongrp))
+ sprintf(llc_occup_path, MON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ mongrp, sock_num);
+ else if (strlen(ctrlgrp) && !strlen(mongrp))
+ sprintf(llc_occup_path, CON_LCC_OCCUP_PATH, RESCTRL_PATH,
+ ctrlgrp, sock_num);
+ else if (!strlen(ctrlgrp) && !strlen(mongrp))
+ sprintf(llc_occup_path, LCC_OCCUP_PATH, RESCTRL_PATH, sock_num);
+}
+
+/*
+ * initialize_llc_occu_resctrl: Appropriately populate "llc_occup_path"
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: cat, cmt.. etc)
+ */
+static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
+ int cpu_no, char *resctrl_val)
+{
+ int resource_id;
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ perror("# Unable to resource_id");
+ return;
+ }
+
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ set_cmt_path(ctrlgrp, mongrp, resource_id);
+}
+
+static int
+measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
+{
+ unsigned long bw_resc, bw_resc_end;
+ float bw_imc;
+ int ret;
+
+ /*
+ * Measure memory bandwidth from resctrl and from
+ * another source which is perf imc value or could
+ * be something else if perf imc event is not available.
+ * Compare the two values to validate resctrl value.
+ * It takes 1sec to measure the data.
+ */
+ ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
+ if (ret < 0)
+ return ret;
+
+ ret = get_mem_bw_resctrl(&bw_resc_end);
+ if (ret < 0)
+ return ret;
+
+ bw_resc = (bw_resc_end - *bw_resc_start) / MB;
+ ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
+ if (ret)
+ return ret;
+
+ *bw_resc_start = bw_resc_end;
+
+ return 0;
+}
+
+/*
+ * resctrl_val: execute benchmark and measure memory bandwidth on
+ * the benchmark
+ * @benchmark_cmd: benchmark command and its arguments
+ * @param: parameters passed to resctrl_val()
+ *
+ * Return: 0 on success. non-zero on failure.
+ */
+int resctrl_val(char **benchmark_cmd, struct resctrl_val_param *param)
+{
+ char *resctrl_val = param->resctrl_val;
+ unsigned long bw_resc_start = 0;
+ struct sigaction sigact;
+ int ret = 0, pipefd[2];
+ char pipe_message = 0;
+ union sigval value;
+
+ if (strcmp(param->filename, "") == 0)
+ sprintf(param->filename, "stdio");
+
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ ret = validate_bw_report_request(param->bw_report);
+ if (ret)
+ return ret;
+ }
+
+ ret = remount_resctrlfs(param->mum_resctrlfs);
+ if (ret)
+ return ret;
+
+ /*
+ * If benchmark wasn't successfully started by child, then child should
+ * kill parent, so save parent's pid
+ */
+ ppid = getpid();
+
+ if (pipe(pipefd)) {
+ perror("# Unable to create pipe");
+
+ return -1;
+ }
+
+ /*
+ * Fork to start benchmark, save child's pid so that it can be killed
+ * when needed
+ */
+ bm_pid = fork();
+ if (bm_pid == -1) {
+ perror("# Unable to fork");
+
+ return -1;
+ }
+
+ if (bm_pid == 0) {
+ /*
+ * Mask all signals except SIGUSR1, parent uses SIGUSR1 to
+ * start benchmark
+ */
+ sigfillset(&sigact.sa_mask);
+ sigdelset(&sigact.sa_mask, SIGUSR1);
+
+ sigact.sa_sigaction = run_benchmark;
+ sigact.sa_flags = SA_SIGINFO;
+
+ /* Register for "SIGUSR1" signal from parent */
+ if (sigaction(SIGUSR1, &sigact, NULL))
+ PARENT_EXIT("Can't register child for signal");
+
+ /* Tell parent that child is ready */
+ close(pipefd[0]);
+ pipe_message = 1;
+ if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ perror("# failed signaling parent process");
+ close(pipefd[1]);
+ return -1;
+ }
+ close(pipefd[1]);
+
+ /* Suspend child until delivery of "SIGUSR1" from parent */
+ sigsuspend(&sigact.sa_mask);
+
+ PARENT_EXIT("Child is done");
+ }
+
+ ksft_print_msg("Benchmark PID: %d\n", bm_pid);
+
+ /*
+ * Register CTRL-C handler for parent, as it has to kill benchmark
+ * before exiting
+ */
+ sigact.sa_sigaction = ctrlc_handler;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGINT, &sigact, NULL) ||
+ sigaction(SIGTERM, &sigact, NULL) ||
+ sigaction(SIGHUP, &sigact, NULL)) {
+ perror("# sigaction");
+ ret = errno;
+ goto out;
+ }
+
+ value.sival_ptr = benchmark_cmd;
+
+ /* Taskset benchmark to specified cpu */
+ ret = taskset_benchmark(bm_pid, param->cpu_no);
+ if (ret)
+ goto out;
+
+ /* Write benchmark to specified control&monitoring grp in resctrl FS */
+ ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
+ resctrl_val);
+ if (ret)
+ goto out;
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ ret = initialize_mem_bw_imc();
+ if (ret)
+ goto out;
+
+ initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+ } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
+ param->cpu_no, resctrl_val);
+
+ /* Parent waits for child to be ready. */
+ close(pipefd[1]);
+ while (pipe_message != 1) {
+ if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
+ sizeof(pipe_message)) {
+ perror("# failed reading message from child process");
+ close(pipefd[0]);
+ goto out;
+ }
+ }
+ close(pipefd[0]);
+
+ /* Signal child to start benchmark */
+ if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
+ perror("# sigqueue SIGUSR1 to child");
+ ret = errno;
+ goto out;
+ }
+
+ /* Give benchmark enough time to fully run */
+ sleep(1);
+
+ /* Test runs until the callback setup() tells the test to stop. */
+ while (1) {
+ ret = param->setup(1, param);
+ if (ret == END_OF_TESTS) {
+ ret = 0;
+ break;
+ }
+ if (ret < 0)
+ break;
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ ret = measure_vals(param, &bw_resc_start);
+ if (ret)
+ break;
+ } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ sleep(1);
+ ret = measure_cache_vals(param, bm_pid);
+ if (ret)
+ break;
+ }
+ }
+
+out:
+ kill(bm_pid, SIGKILL);
+ umount_resctrlfs();
+
+ return ret;
+}
diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c
new file mode 100644
index 000000000..6f543e470
--- /dev/null
+++ b/tools/testing/selftests/resctrl/resctrlfs.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic resctrl file system operations
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
+ * Fenghua Yu <fenghua.yu@intel.com>
+ */
+#include "resctrl.h"
+
+static int find_resctrl_mount(char *buffer)
+{
+ FILE *mounts;
+ char line[256], *fs, *mntpoint;
+
+ mounts = fopen("/proc/mounts", "r");
+ if (!mounts) {
+ perror("/proc/mounts");
+ return -ENXIO;
+ }
+ while (!feof(mounts)) {
+ if (!fgets(line, 256, mounts))
+ break;
+ fs = strtok(line, " \t");
+ if (!fs)
+ continue;
+ mntpoint = strtok(NULL, " \t");
+ if (!mntpoint)
+ continue;
+ fs = strtok(NULL, " \t");
+ if (!fs)
+ continue;
+ if (strcmp(fs, "resctrl"))
+ continue;
+
+ fclose(mounts);
+ if (buffer)
+ strncpy(buffer, mntpoint, 256);
+
+ return 0;
+ }
+
+ fclose(mounts);
+
+ return -ENOENT;
+}
+
+/*
+ * remount_resctrlfs - Remount resctrl FS at /sys/fs/resctrl
+ * @mum_resctrlfs: Should the resctrl FS be remounted?
+ *
+ * If not mounted, mount it.
+ * If mounted and mum_resctrlfs then remount resctrl FS.
+ * If mounted and !mum_resctrlfs then noop
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int remount_resctrlfs(bool mum_resctrlfs)
+{
+ char mountpoint[256];
+ int ret;
+
+ ret = find_resctrl_mount(mountpoint);
+ if (ret)
+ strcpy(mountpoint, RESCTRL_PATH);
+
+ if (!ret && mum_resctrlfs && umount(mountpoint))
+ ksft_print_msg("Fail: unmounting \"%s\"\n", mountpoint);
+
+ if (!ret && !mum_resctrlfs)
+ return 0;
+
+ ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH);
+ ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL);
+ if (ret)
+ perror("# mount");
+
+ return ret;
+}
+
+int umount_resctrlfs(void)
+{
+ if (find_resctrl_mount(NULL))
+ return 0;
+
+ if (umount(RESCTRL_PATH)) {
+ perror("# Unable to umount resctrl");
+
+ return errno;
+ }
+
+ return 0;
+}
+
+/*
+ * get_resource_id - Get socket number/l3 id for a specified CPU
+ * @cpu_no: CPU number
+ * @resource_id: Socket number or l3_id
+ *
+ * Return: >= 0 on success, < 0 on failure.
+ */
+int get_resource_id(int cpu_no, int *resource_id)
+{
+ char phys_pkg_path[1024];
+ FILE *fp;
+
+ if (get_vendor() == ARCH_AMD)
+ sprintf(phys_pkg_path, "%s%d/cache/index3/id",
+ PHYS_ID_PATH, cpu_no);
+ else
+ sprintf(phys_pkg_path, "%s%d/topology/physical_package_id",
+ PHYS_ID_PATH, cpu_no);
+
+ fp = fopen(phys_pkg_path, "r");
+ if (!fp) {
+ perror("Failed to open physical_package_id");
+
+ return -1;
+ }
+ if (fscanf(fp, "%d", resource_id) <= 0) {
+ perror("Could not get socket number or l3 id");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * get_cache_size - Get cache size for a specified CPU
+ * @cpu_no: CPU number
+ * @cache_type: Cache level L2/L3
+ * @cache_size: pointer to cache_size
+ *
+ * Return: = 0 on success, < 0 on failure.
+ */
+int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size)
+{
+ char cache_path[1024], cache_str[64];
+ int length, i, cache_num;
+ FILE *fp;
+
+ if (!strcmp(cache_type, "L3")) {
+ cache_num = 3;
+ } else if (!strcmp(cache_type, "L2")) {
+ cache_num = 2;
+ } else {
+ perror("Invalid cache level");
+ return -1;
+ }
+
+ sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size",
+ cpu_no, cache_num);
+ fp = fopen(cache_path, "r");
+ if (!fp) {
+ perror("Failed to open cache size");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cache_str) <= 0) {
+ perror("Could not get cache_size");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ length = (int)strlen(cache_str);
+
+ *cache_size = 0;
+
+ for (i = 0; i < length; i++) {
+ if ((cache_str[i] >= '0') && (cache_str[i] <= '9'))
+
+ *cache_size = *cache_size * 10 + (cache_str[i] - '0');
+
+ else if (cache_str[i] == 'K')
+
+ *cache_size = *cache_size * 1024;
+
+ else if (cache_str[i] == 'M')
+
+ *cache_size = *cache_size * 1024 * 1024;
+
+ else
+ break;
+ }
+
+ return 0;
+}
+
+#define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu"
+
+/*
+ * get_cbm_mask - Get cbm mask for given cache
+ * @cache_type: Cache level L2/L3
+ * @cbm_mask: cbm_mask returned as a string
+ *
+ * Return: = 0 on success, < 0 on failure.
+ */
+int get_cbm_mask(char *cache_type, char *cbm_mask)
+{
+ char cbm_mask_path[1024];
+ FILE *fp;
+
+ if (!cbm_mask)
+ return -1;
+
+ sprintf(cbm_mask_path, "%s/%s/cbm_mask", CBM_MASK_PATH, cache_type);
+
+ fp = fopen(cbm_mask_path, "r");
+ if (!fp) {
+ perror("Failed to open cache level");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cbm_mask) <= 0) {
+ perror("Could not get max cbm_mask");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * get_core_sibling - Get sibling core id from the same socket for given CPU
+ * @cpu_no: CPU number
+ *
+ * Return: > 0 on success, < 0 on failure.
+ */
+int get_core_sibling(int cpu_no)
+{
+ char core_siblings_path[1024], cpu_list_str[64];
+ int sibling_cpu_no = -1;
+ FILE *fp;
+
+ sprintf(core_siblings_path, "%s%d/topology/core_siblings_list",
+ CORE_SIBLINGS_PATH, cpu_no);
+
+ fp = fopen(core_siblings_path, "r");
+ if (!fp) {
+ perror("Failed to open core siblings path");
+
+ return -1;
+ }
+ if (fscanf(fp, "%s", cpu_list_str) <= 0) {
+ perror("Could not get core_siblings list");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ char *token = strtok(cpu_list_str, "-,");
+
+ while (token) {
+ sibling_cpu_no = atoi(token);
+ /* Skipping core 0 as we don't want to run test on core 0 */
+ if (sibling_cpu_no != 0 && sibling_cpu_no != cpu_no)
+ break;
+ token = strtok(NULL, "-,");
+ }
+
+ return sibling_cpu_no;
+}
+
+/*
+ * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu
+ * @bm_pid: PID that should be binded
+ * @cpu_no: CPU number at which the PID would be binded
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int taskset_benchmark(pid_t bm_pid, int cpu_no)
+{
+ cpu_set_t my_set;
+
+ CPU_ZERO(&my_set);
+ CPU_SET(cpu_no, &my_set);
+
+ if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) {
+ perror("Unable to taskset benchmark");
+
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
+ * in specified signal. Direct benchmark stdio to /dev/null.
+ * @signum: signal number
+ * @info: signal info
+ * @ucontext: user context in signal handling
+ *
+ * Return: void
+ */
+void run_benchmark(int signum, siginfo_t *info, void *ucontext)
+{
+ int operation, ret, malloc_and_init_memory, memflush;
+ unsigned long span, buffer_span;
+ char **benchmark_cmd;
+ char resctrl_val[64];
+ FILE *fp;
+
+ benchmark_cmd = info->si_ptr;
+
+ /*
+ * Direct stdio of child to /dev/null, so that only parent writes to
+ * stdio (console)
+ */
+ fp = freopen("/dev/null", "w", stdout);
+ if (!fp)
+ PARENT_EXIT("Unable to direct benchmark status to /dev/null");
+
+ if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
+ /* Execute default fill_buf benchmark */
+ span = strtoul(benchmark_cmd[1], NULL, 10);
+ malloc_and_init_memory = atoi(benchmark_cmd[2]);
+ memflush = atoi(benchmark_cmd[3]);
+ operation = atoi(benchmark_cmd[4]);
+ sprintf(resctrl_val, "%s", benchmark_cmd[5]);
+
+ if (strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ buffer_span = span * MB;
+ else
+ buffer_span = span;
+
+ if (run_fill_buf(buffer_span, malloc_and_init_memory, memflush,
+ operation, resctrl_val))
+ fprintf(stderr, "Error in running fill buffer\n");
+ } else {
+ /* Execute specified benchmark */
+ ret = execvp(benchmark_cmd[0], benchmark_cmd);
+ if (ret)
+ perror("wrong\n");
+ }
+
+ fclose(stdout);
+ PARENT_EXIT("Unable to run specified benchmark");
+}
+
+/*
+ * create_grp - Create a group only if one doesn't exist
+ * @grp_name: Name of the group
+ * @grp: Full path and name of the group
+ * @parent_grp: Full path and name of the parent group
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int create_grp(const char *grp_name, char *grp, const char *parent_grp)
+{
+ int found_grp = 0;
+ struct dirent *ep;
+ DIR *dp;
+
+ /*
+ * At this point, we are guaranteed to have resctrl FS mounted and if
+ * length of grp_name == 0, it means, user wants to use root con_mon
+ * grp, so do nothing
+ */
+ if (strlen(grp_name) == 0)
+ return 0;
+
+ /* Check if requested grp exists or not */
+ dp = opendir(parent_grp);
+ if (dp) {
+ while ((ep = readdir(dp)) != NULL) {
+ if (strcmp(ep->d_name, grp_name) == 0)
+ found_grp = 1;
+ }
+ closedir(dp);
+ } else {
+ perror("Unable to open resctrl for group");
+
+ return -1;
+ }
+
+ /* Requested grp doesn't exist, hence create it */
+ if (found_grp == 0) {
+ if (mkdir(grp, 0) == -1) {
+ perror("Unable to create group");
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int write_pid_to_tasks(char *tasks, pid_t pid)
+{
+ FILE *fp;
+
+ fp = fopen(tasks, "w");
+ if (!fp) {
+ perror("Failed to open tasks file");
+
+ return -1;
+ }
+ if (fprintf(fp, "%d\n", pid) < 0) {
+ perror("Failed to wr pid to tasks file");
+ fclose(fp);
+
+ return -1;
+ }
+ fclose(fp);
+
+ return 0;
+}
+
+/*
+ * write_bm_pid_to_resctrl - Write a PID (i.e. benchmark) to resctrl FS
+ * @bm_pid: PID that should be written
+ * @ctrlgrp: Name of the control monitor group (con_mon grp)
+ * @mongrp: Name of the monitor group (mon grp)
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ *
+ * If a con_mon grp is requested, create it and write pid to it, otherwise
+ * write pid to root con_mon grp.
+ * If a mon grp is requested, create it and write pid to it, otherwise
+ * pid is not written, this means that pid is in con_mon grp and hence
+ * should consult con_mon grp's mon_data directory for results.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp,
+ char *resctrl_val)
+{
+ char controlgroup[128], monitorgroup[512], monitorgroup_p[256];
+ char tasks[1024];
+ int ret = 0;
+
+ if (strlen(ctrlgrp))
+ sprintf(controlgroup, "%s/%s", RESCTRL_PATH, ctrlgrp);
+ else
+ sprintf(controlgroup, "%s", RESCTRL_PATH);
+
+ /* Create control and monitoring group and write pid into it */
+ ret = create_grp(ctrlgrp, controlgroup, RESCTRL_PATH);
+ if (ret)
+ goto out;
+ sprintf(tasks, "%s/tasks", controlgroup);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
+
+ /* Create mon grp and write pid into it for "mbm" and "cmt" test */
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)) ||
+ !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ if (strlen(mongrp)) {
+ sprintf(monitorgroup_p, "%s/mon_groups", controlgroup);
+ sprintf(monitorgroup, "%s/%s", monitorgroup_p, mongrp);
+ ret = create_grp(mongrp, monitorgroup, monitorgroup_p);
+ if (ret)
+ goto out;
+
+ sprintf(tasks, "%s/mon_groups/%s/tasks",
+ controlgroup, mongrp);
+ ret = write_pid_to_tasks(tasks, bm_pid);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ ksft_print_msg("Writing benchmark parameters to resctrl FS\n");
+ if (ret)
+ perror("# writing to resctrlfs");
+
+ return ret;
+}
+
+/*
+ * write_schemata - Update schemata of a con_mon grp
+ * @ctrlgrp: Name of the con_mon grp
+ * @schemata: Schemata that should be updated to
+ * @cpu_no: CPU number that the benchmark PID is binded to
+ * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc)
+ *
+ * Update schemata of a con_mon grp *only* if requested resctrl feature is
+ * allocation type
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val)
+{
+ char controlgroup[1024], schema[1024], reason[64];
+ int resource_id, ret = 0;
+ FILE *fp;
+
+ if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) &&
+ strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) &&
+ strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ return -ENOENT;
+
+ if (!schemata) {
+ ksft_print_msg("Skipping empty schemata update\n");
+
+ return -1;
+ }
+
+ if (get_resource_id(cpu_no, &resource_id) < 0) {
+ sprintf(reason, "Failed to get resource id");
+ ret = -1;
+
+ goto out;
+ }
+
+ if (strlen(ctrlgrp) != 0)
+ sprintf(controlgroup, "%s/%s/schemata", RESCTRL_PATH, ctrlgrp);
+ else
+ sprintf(controlgroup, "%s/schemata", RESCTRL_PATH);
+
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) ||
+ !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
+ sprintf(schema, "%s%d%c%s", "L3:", resource_id, '=', schemata);
+ if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)))
+ sprintf(schema, "%s%d%c%s", "MB:", resource_id, '=', schemata);
+
+ fp = fopen(controlgroup, "w");
+ if (!fp) {
+ sprintf(reason, "Failed to open control group");
+ ret = -1;
+
+ goto out;
+ }
+
+ if (fprintf(fp, "%s\n", schema) < 0) {
+ sprintf(reason, "Failed to write schemata in control group");
+ fclose(fp);
+ ret = -1;
+
+ goto out;
+ }
+ fclose(fp);
+
+out:
+ ksft_print_msg("Write schema \"%s\" to resctrl FS%s%s\n",
+ schema, ret ? " # " : "",
+ ret ? reason : "");
+
+ return ret;
+}
+
+bool check_resctrlfs_support(void)
+{
+ FILE *inf = fopen("/proc/filesystems", "r");
+ DIR *dp;
+ char *res;
+ bool ret = false;
+
+ if (!inf)
+ return false;
+
+ res = fgrep(inf, "nodev\tresctrl\n");
+
+ if (res) {
+ ret = true;
+ free(res);
+ }
+
+ fclose(inf);
+
+ ksft_print_msg("%s Check kernel supports resctrl filesystem\n",
+ ret ? "Pass:" : "Fail:");
+
+ if (!ret)
+ return ret;
+
+ dp = opendir(RESCTRL_PATH);
+ ksft_print_msg("%s Check resctrl mountpoint \"%s\" exists\n",
+ dp ? "Pass:" : "Fail:", RESCTRL_PATH);
+ if (dp)
+ closedir(dp);
+
+ ksft_print_msg("resctrl filesystem %s mounted\n",
+ find_resctrl_mount(NULL) ? "not" : "is");
+
+ return ret;
+}
+
+char *fgrep(FILE *inf, const char *str)
+{
+ char line[256];
+ int slen = strlen(str);
+
+ while (!feof(inf)) {
+ if (!fgets(line, 256, inf))
+ break;
+ if (strncmp(line, str, slen))
+ continue;
+
+ return strdup(line);
+ }
+
+ return NULL;
+}
+
+/*
+ * validate_resctrl_feature_request - Check if requested feature is valid.
+ * @resctrl_val: Requested feature
+ *
+ * Return: True if the feature is supported, else false
+ */
+bool validate_resctrl_feature_request(const char *resctrl_val)
+{
+ struct stat statbuf;
+ bool found = false;
+ char *res;
+ FILE *inf;
+
+ if (!resctrl_val)
+ return false;
+
+ if (remount_resctrlfs(false))
+ return false;
+
+ if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR))) {
+ if (!stat(L3_PATH, &statbuf))
+ return true;
+ } else if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
+ if (!stat(MB_PATH, &statbuf))
+ return true;
+ } else if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
+ !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ if (!stat(L3_MON_PATH, &statbuf)) {
+ inf = fopen(L3_MON_FEATURES_PATH, "r");
+ if (!inf)
+ return false;
+
+ if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
+ res = fgrep(inf, "llc_occupancy");
+ if (res) {
+ found = true;
+ free(res);
+ }
+ }
+
+ if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
+ res = fgrep(inf, "mbm_total_bytes");
+ if (res) {
+ free(res);
+ res = fgrep(inf, "mbm_local_bytes");
+ if (res) {
+ found = true;
+ free(res);
+ }
+ }
+ }
+ fclose(inf);
+ }
+ }
+
+ return found;
+}
+
+int filter_dmesg(void)
+{
+ char line[1024];
+ FILE *fp;
+ int pipefds[2];
+ pid_t pid;
+ int ret;
+
+ ret = pipe(pipefds);
+ if (ret) {
+ perror("pipe");
+ return ret;
+ }
+ pid = fork();
+ if (pid == 0) {
+ close(pipefds[0]);
+ dup2(pipefds[1], STDOUT_FILENO);
+ execlp("dmesg", "dmesg", NULL);
+ perror("executing dmesg");
+ exit(1);
+ }
+ close(pipefds[1]);
+ fp = fdopen(pipefds[0], "r");
+ if (!fp) {
+ perror("fdopen(pipe)");
+ kill(pid, SIGTERM);
+
+ return -1;
+ }
+
+ while (fgets(line, 1024, fp)) {
+ if (strstr(line, "intel_rdt:"))
+ ksft_print_msg("dmesg: %s", line);
+ if (strstr(line, "resctrl:"))
+ ksft_print_msg("dmesg: %s", line);
+ }
+ fclose(fp);
+ waitpid(pid, NULL, 0);
+
+ return 0;
+}
+
+int validate_bw_report_request(char *bw_report)
+{
+ if (strcmp(bw_report, "reads") == 0)
+ return 0;
+ if (strcmp(bw_report, "writes") == 0)
+ return 0;
+ if (strcmp(bw_report, "nt-writes") == 0) {
+ strcpy(bw_report, "writes");
+ return 0;
+ }
+ if (strcmp(bw_report, "total") == 0)
+ return 0;
+
+ fprintf(stderr, "Requested iMC B/W report type unavailable\n");
+
+ return -1;
+}
+
+int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu,
+ int group_fd, unsigned long flags)
+{
+ int ret;
+
+ ret = syscall(__NR_perf_event_open, hw_event, pid, cpu,
+ group_fd, flags);
+ return ret;
+}
+
+unsigned int count_bits(unsigned long n)
+{
+ unsigned int count = 0;
+
+ while (n) {
+ count += n & 1;
+ n >>= 1;
+ }
+
+ return count;
+}
diff --git a/tools/testing/selftests/resctrl/settings b/tools/testing/selftests/resctrl/settings
new file mode 100644
index 000000000..a383f3d45
--- /dev/null
+++ b/tools/testing/selftests/resctrl/settings
@@ -0,0 +1,3 @@
+# If running time is longer than 120 seconds when new tests are added in
+# the future, increase timeout here.
+timeout=120