diff options
Diffstat (limited to 'tools/testing/selftests/resctrl')
-rw-r--r-- | tools/testing/selftests/resctrl/cache.c | 287 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/cat_test.c | 421 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/cmt_test.c | 80 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/fill_buf.c | 132 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/mba_test.c | 30 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/mbm_test.c | 34 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/resctrl.h | 145 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/resctrl_tests.c | 207 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/resctrl_val.c | 138 | ||||
-rw-r--r-- | tools/testing/selftests/resctrl/resctrlfs.c | 405 |
10 files changed, 1130 insertions, 749 deletions
diff --git a/tools/testing/selftests/resctrl/cache.c b/tools/testing/selftests/resctrl/cache.c index bcbca356d5..1b339d6bbf 100644 --- a/tools/testing/selftests/resctrl/cache.c +++ b/tools/testing/selftests/resctrl/cache.c @@ -3,106 +3,59 @@ #include <stdint.h> #include "resctrl.h" -struct read_format { - __u64 nr; /* The number of events */ - struct { - __u64 value; /* The value of the event */ - } values[2]; -}; - -static struct perf_event_attr pea_llc_miss; -static struct read_format rf_cqm; -static int fd_lm; char llc_occup_path[1024]; -static void initialize_perf_event_attr(void) +void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config) { - pea_llc_miss.type = PERF_TYPE_HARDWARE; - pea_llc_miss.size = sizeof(struct perf_event_attr); - pea_llc_miss.read_format = PERF_FORMAT_GROUP; - pea_llc_miss.exclude_kernel = 1; - pea_llc_miss.exclude_hv = 1; - pea_llc_miss.exclude_idle = 1; - pea_llc_miss.exclude_callchain_kernel = 1; - pea_llc_miss.inherit = 1; - pea_llc_miss.exclude_guest = 1; - pea_llc_miss.disabled = 1; -} - -static void ioctl_perf_event_ioc_reset_enable(void) -{ - ioctl(fd_lm, PERF_EVENT_IOC_RESET, 0); - ioctl(fd_lm, PERF_EVENT_IOC_ENABLE, 0); -} - -static int perf_event_open_llc_miss(pid_t pid, int cpu_no) -{ - fd_lm = perf_event_open(&pea_llc_miss, pid, cpu_no, -1, - PERF_FLAG_FD_CLOEXEC); - if (fd_lm == -1) { - perror("Error opening leader"); - ctrlc_handler(0, NULL, NULL); - return -1; - } - - return 0; -} - -static void initialize_llc_perf(void) -{ - memset(&pea_llc_miss, 0, sizeof(struct perf_event_attr)); - memset(&rf_cqm, 0, sizeof(struct read_format)); - - /* Initialize perf_event_attr structures for HW_CACHE_MISSES */ - initialize_perf_event_attr(); - - pea_llc_miss.config = PERF_COUNT_HW_CACHE_MISSES; - - rf_cqm.nr = 1; + memset(pea, 0, sizeof(*pea)); + pea->type = PERF_TYPE_HARDWARE; + pea->size = sizeof(*pea); + pea->read_format = PERF_FORMAT_GROUP; + pea->exclude_kernel = 1; + pea->exclude_hv = 1; + pea->exclude_idle = 1; + pea->exclude_callchain_kernel = 1; + pea->inherit = 1; + pea->exclude_guest = 1; + pea->disabled = 1; + pea->config = config; } -static int reset_enable_llc_perf(pid_t pid, int cpu_no) +/* Start counters to log values */ +int perf_event_reset_enable(int pe_fd) { - int ret = 0; + int ret; - ret = perf_event_open_llc_miss(pid, cpu_no); + ret = ioctl(pe_fd, PERF_EVENT_IOC_RESET, 0); if (ret < 0) return ret; - /* Start counters to log values */ - ioctl_perf_event_ioc_reset_enable(); + ret = ioctl(pe_fd, PERF_EVENT_IOC_ENABLE, 0); + if (ret < 0) + return ret; return 0; } -/* - * get_llc_perf: llc cache miss through perf events - * @llc_perf_miss: LLC miss counter that is filled on success - * - * Perf events like HW_CACHE_MISSES could be used to validate number of - * cache lines allocated. - * - * Return: =0 on success. <0 on failure. - */ -static int get_llc_perf(unsigned long *llc_perf_miss) +void perf_event_initialize_read_format(struct perf_event_read *pe_read) { - __u64 total_misses; - int ret; - - /* Stop counters after one span to get miss rate */ + memset(pe_read, 0, sizeof(*pe_read)); + pe_read->nr = 1; +} - ioctl(fd_lm, PERF_EVENT_IOC_DISABLE, 0); +int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no) +{ + int pe_fd; - ret = read(fd_lm, &rf_cqm, sizeof(struct read_format)); - if (ret == -1) { - perror("Could not get llc misses through perf"); + pe_fd = perf_event_open(pea, pid, cpu_no, -1, PERF_FLAG_FD_CLOEXEC); + if (pe_fd == -1) { + ksft_perror("Error opening leader"); return -1; } - total_misses = rf_cqm.values[0].value; - *llc_perf_miss = total_misses; + perf_event_reset_enable(pe_fd); - return 0; + return pe_fd; } /* @@ -124,12 +77,12 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy) fp = fopen(llc_occup_path, "r"); if (!fp) { - perror("Failed to open results file"); + ksft_perror("Failed to open results file"); - return errno; + return -1; } if (fscanf(fp, "%lu", llc_occupancy) <= 0) { - perror("Could not get llc occupancy"); + ksft_perror("Could not get llc occupancy"); fclose(fp); return -1; @@ -146,163 +99,91 @@ static int get_llc_occu_resctrl(unsigned long *llc_occupancy) * @llc_value: perf miss value / * llc occupancy value reported by resctrl FS * - * Return: 0 on success. non-zero on failure. + * Return: 0 on success, < 0 on error. */ -static int print_results_cache(char *filename, int bm_pid, - unsigned long llc_value) +static int print_results_cache(const char *filename, int bm_pid, __u64 llc_value) { FILE *fp; if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) { - printf("Pid: %d \t LLC_value: %lu\n", bm_pid, - llc_value); + printf("Pid: %d \t LLC_value: %llu\n", bm_pid, llc_value); } else { fp = fopen(filename, "a"); if (!fp) { - perror("Cannot open results file"); + ksft_perror("Cannot open results file"); - return errno; + return -1; } - fprintf(fp, "Pid: %d \t llc_value: %lu\n", bm_pid, llc_value); + fprintf(fp, "Pid: %d \t llc_value: %llu\n", bm_pid, llc_value); fclose(fp); } return 0; } -int measure_cache_vals(struct resctrl_val_param *param, int bm_pid) +/* + * perf_event_measure - Measure perf events + * @filename: Filename for writing the results + * @bm_pid: PID that runs the benchmark + * + * Measures perf events (e.g., cache misses) and writes the results into + * @filename. @bm_pid is written to the results file along with the measured + * value. + * + * Return: =0 on success. <0 on failure. + */ +int perf_event_measure(int pe_fd, struct perf_event_read *pe_read, + const char *filename, int bm_pid) { - unsigned long llc_perf_miss = 0, llc_occu_resc = 0, llc_value = 0; int ret; - /* - * Measure cache miss from perf. - */ - if (!strncmp(param->resctrl_val, CAT_STR, sizeof(CAT_STR))) { - ret = get_llc_perf(&llc_perf_miss); - if (ret < 0) - return ret; - llc_value = llc_perf_miss; - } + /* Stop counters after one span to get miss rate */ + ret = ioctl(pe_fd, PERF_EVENT_IOC_DISABLE, 0); + if (ret < 0) + return ret; - /* - * Measure llc occupancy from resctrl. - */ - if (!strncmp(param->resctrl_val, CMT_STR, sizeof(CMT_STR))) { - ret = get_llc_occu_resctrl(&llc_occu_resc); - if (ret < 0) - return ret; - llc_value = llc_occu_resc; + ret = read(pe_fd, pe_read, sizeof(*pe_read)); + if (ret == -1) { + ksft_perror("Could not get perf value"); + return -1; } - ret = print_results_cache(param->filename, bm_pid, llc_value); - if (ret) - return ret; - return 0; + return print_results_cache(filename, bm_pid, pe_read->values[0].value); } /* - * cache_val: execute benchmark and measure LLC occupancy resctrl - * and perf cache miss for the benchmark - * @param: parameters passed to cache_val() - * @span: buffer size for the benchmark + * measure_llc_resctrl - Measure resctrl LLC value from resctrl + * @filename: Filename for writing the results + * @bm_pid: PID that runs the benchmark * - * Return: 0 on success. non-zero on failure. + * Measures LLC occupancy from resctrl and writes the results into @filename. + * @bm_pid is written to the results file along with the measured value. + * + * Return: =0 on success. <0 on failure. */ -int cat_val(struct resctrl_val_param *param, size_t span) +int measure_llc_resctrl(const char *filename, int bm_pid) { - int memflush = 1, operation = 0, ret = 0; - char *resctrl_val = param->resctrl_val; - pid_t bm_pid; - - if (strcmp(param->filename, "") == 0) - sprintf(param->filename, "stdio"); - - bm_pid = getpid(); - - /* Taskset benchmark to specified cpu */ - ret = taskset_benchmark(bm_pid, param->cpu_no); - if (ret) - return ret; + unsigned long llc_occu_resc = 0; + int ret; - /* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/ - ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp, - resctrl_val); - if (ret) + ret = get_llc_occu_resctrl(&llc_occu_resc); + if (ret < 0) return ret; - initialize_llc_perf(); - - /* Test runs until the callback setup() tells the test to stop. */ - while (1) { - ret = param->setup(param); - if (ret == END_OF_TESTS) { - ret = 0; - break; - } - if (ret < 0) - break; - ret = reset_enable_llc_perf(bm_pid, param->cpu_no); - if (ret) - break; - - if (run_fill_buf(span, memflush, operation, true)) { - fprintf(stderr, "Error-running fill buffer\n"); - ret = -1; - goto pe_close; - } - - sleep(1); - ret = measure_cache_vals(param, bm_pid); - if (ret) - goto pe_close; - } - - return ret; - -pe_close: - close(fd_lm); - return ret; + return print_results_cache(filename, bm_pid, llc_occu_resc); } /* - * show_cache_info: show cache test result information - * @sum_llc_val: sum of LLC cache result data - * @no_of_bits: number of bits - * @cache_span: cache span in bytes for CMT or in lines for CAT - * @max_diff: max difference - * @max_diff_percent: max difference percentage - * @num_of_runs: number of runs - * @platform: show test information on this platform - * @cmt: CMT test or CAT test - * - * Return: 0 on success. non-zero on failure. + * show_cache_info - Show generic cache test information + * @no_of_bits: Number of bits + * @avg_llc_val: Average of LLC cache result data + * @cache_span: Cache span + * @lines: @cache_span in lines or bytes */ -int show_cache_info(unsigned long sum_llc_val, int no_of_bits, - size_t cache_span, unsigned long max_diff, - unsigned long max_diff_percent, unsigned long num_of_runs, - bool platform, bool cmt) +void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines) { - unsigned long avg_llc_val = 0; - float diff_percent; - long avg_diff = 0; - int ret; - - avg_llc_val = sum_llc_val / num_of_runs; - avg_diff = (long)abs(cache_span - avg_llc_val); - diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100; - - ret = platform && abs((int)diff_percent) > max_diff_percent && - (cmt ? (abs(avg_diff) > max_diff) : true); - - ksft_print_msg("%s Check cache miss rate within %lu%%\n", - ret ? "Fail:" : "Pass:", max_diff_percent); - - ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent)); ksft_print_msg("Number of bits: %d\n", no_of_bits); - ksft_print_msg("Average LLC val: %lu\n", avg_llc_val); - ksft_print_msg("Cache span (%s): %zu\n", cmt ? "bytes" : "lines", + ksft_print_msg("Average LLC val: %llu\n", avg_llc_val); + ksft_print_msg("Cache span (%s): %zu\n", lines ? "lines" : "bytes", cache_span); - - return ret; } diff --git a/tools/testing/selftests/resctrl/cat_test.c b/tools/testing/selftests/resctrl/cat_test.c index 224ba8544d..4cb991be8e 100644 --- a/tools/testing/selftests/resctrl/cat_test.c +++ b/tools/testing/selftests/resctrl/cat_test.c @@ -11,108 +11,254 @@ #include "resctrl.h" #include <unistd.h> -#define RESULT_FILE_NAME1 "result_cat1" -#define RESULT_FILE_NAME2 "result_cat2" +#define RESULT_FILE_NAME "result_cat" #define NUM_OF_RUNS 5 -#define MAX_DIFF_PERCENT 4 -#define MAX_DIFF 1000000 /* - * Change schemata. Write schemata to specified - * con_mon grp, mon_grp in resctrl FS. - * Run 5 times in order to get average values. + * Minimum difference in LLC misses between a test with n+1 bits CBM to the + * test with n bits is MIN_DIFF_PERCENT_PER_BIT * (n - 1). With e.g. 5 vs 4 + * bits in the CBM mask, the minimum difference must be at least + * MIN_DIFF_PERCENT_PER_BIT * (4 - 1) = 3 percent. + * + * The relationship between number of used CBM bits and difference in LLC + * misses is not expected to be linear. With a small number of bits, the + * margin is smaller than with larger number of bits. For selftest purposes, + * however, linear approach is enough because ultimately only pass/fail + * decision has to be made and distinction between strong and stronger + * signal is irrelevant. */ -static int cat_setup(struct resctrl_val_param *p) +#define MIN_DIFF_PERCENT_PER_BIT 1UL + +static int show_results_info(__u64 sum_llc_val, int no_of_bits, + unsigned long cache_span, + unsigned long min_diff_percent, + unsigned long num_of_runs, bool platform, + __s64 *prev_avg_llc_val) { - char schemata[64]; + __u64 avg_llc_val = 0; + float avg_diff; int ret = 0; - /* Run NUM_OF_RUNS times */ - if (p->num_of_runs >= NUM_OF_RUNS) - return END_OF_TESTS; + avg_llc_val = sum_llc_val / num_of_runs; + if (*prev_avg_llc_val) { + float delta = (__s64)(avg_llc_val - *prev_avg_llc_val); + + avg_diff = delta / *prev_avg_llc_val; + ret = platform && (avg_diff * 100) < (float)min_diff_percent; + + ksft_print_msg("%s Check cache miss rate changed more than %.1f%%\n", + ret ? "Fail:" : "Pass:", (float)min_diff_percent); - if (p->num_of_runs == 0) { - sprintf(schemata, "%lx", p->mask); - ret = write_schemata(p->ctrlgrp, schemata, p->cpu_no, - p->resctrl_val); + ksft_print_msg("Percent diff=%.1f\n", avg_diff * 100); } - p->num_of_runs++; + *prev_avg_llc_val = avg_llc_val; + + show_cache_info(no_of_bits, avg_llc_val, cache_span, true); return ret; } -static int check_results(struct resctrl_val_param *param, size_t span) +/* Remove the highest bit from CBM */ +static unsigned long next_mask(unsigned long current_mask) +{ + return current_mask & (current_mask >> 1); +} + +static int check_results(struct resctrl_val_param *param, const char *cache_type, + unsigned long cache_total_size, unsigned long full_cache_mask, + unsigned long current_mask) { char *token_array[8], temp[512]; - unsigned long sum_llc_perf_miss = 0; - int runs = 0, no_of_bits = 0; + __u64 sum_llc_perf_miss = 0; + __s64 prev_avg_llc_val = 0; + unsigned long alloc_size; + int runs = 0; + int fail = 0; + int ret; FILE *fp; ksft_print_msg("Checking for pass/fail\n"); fp = fopen(param->filename, "r"); if (!fp) { - perror("# Cannot open file"); + ksft_perror("Cannot open file"); - return errno; + return -1; } while (fgets(temp, sizeof(temp), fp)) { char *token = strtok(temp, ":\t"); int fields = 0; + int bits; while (token) { token_array[fields++] = token; token = strtok(NULL, ":\t"); } - /* - * Discard the first value which is inaccurate due to monitoring - * setup transition phase. - */ - if (runs > 0) - sum_llc_perf_miss += strtoul(token_array[3], NULL, 0); + + sum_llc_perf_miss += strtoull(token_array[3], NULL, 0); runs++; + + if (runs < NUM_OF_RUNS) + continue; + + if (!current_mask) { + ksft_print_msg("Unexpected empty cache mask\n"); + break; + } + + alloc_size = cache_portion_size(cache_total_size, current_mask, full_cache_mask); + + bits = count_bits(current_mask); + + ret = show_results_info(sum_llc_perf_miss, bits, + alloc_size / 64, + MIN_DIFF_PERCENT_PER_BIT * (bits - 1), + runs, get_vendor() == ARCH_INTEL, + &prev_avg_llc_val); + if (ret) + fail = 1; + + runs = 0; + sum_llc_perf_miss = 0; + current_mask = next_mask(current_mask); } fclose(fp); - no_of_bits = count_bits(param->mask); - return show_cache_info(sum_llc_perf_miss, no_of_bits, span / 64, - MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, - get_vendor() == ARCH_INTEL, false); + return fail; } void cat_test_cleanup(void) { - remove(RESULT_FILE_NAME1); - remove(RESULT_FILE_NAME2); + remove(RESULT_FILE_NAME); } -int cat_perf_miss_val(int cpu_no, int n, char *cache_type) +/* + * cat_test - Execute CAT benchmark and measure cache misses + * @test: Test information structure + * @uparams: User supplied parameters + * @param: Parameters passed to cat_test() + * @span: Buffer size for the benchmark + * @current_mask Start mask for the first iteration + * + * Run CAT selftest by varying the allocated cache portion and comparing the + * impact on cache misses (the result analysis is done in check_results() + * and show_results_info(), not in this function). + * + * One bit is removed from the CAT allocation bit mask (in current_mask) for + * each subsequent test which keeps reducing the size of the allocated cache + * portion. A single test flushes the buffer, reads it to warm up the cache, + * and reads the buffer again. The cache misses are measured during the last + * read pass. + * + * Return: 0 when the test was run, < 0 on error. + */ +static int cat_test(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *param, + size_t span, unsigned long current_mask) { - unsigned long l_mask, l_mask_1; - int ret, pipefd[2], sibling_cpu_no; - unsigned long cache_size = 0; - unsigned long long_mask; - char cbm_mask[256]; + char *resctrl_val = param->resctrl_val; + struct perf_event_read pe_read; + struct perf_event_attr pea; + cpu_set_t old_affinity; + unsigned char *buf; + char schemata[64]; + int ret, i, pe_fd; + pid_t bm_pid; + + if (strcmp(param->filename, "") == 0) + sprintf(param->filename, "stdio"); + + bm_pid = getpid(); + + /* Taskset benchmark to specified cpu */ + ret = taskset_benchmark(bm_pid, uparams->cpu, &old_affinity); + if (ret) + return ret; + + /* Write benchmark to specified con_mon grp, mon_grp in resctrl FS*/ + ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp, + resctrl_val); + if (ret) + goto reset_affinity; + + perf_event_attr_initialize(&pea, PERF_COUNT_HW_CACHE_MISSES); + perf_event_initialize_read_format(&pe_read); + pe_fd = perf_open(&pea, bm_pid, uparams->cpu); + if (pe_fd < 0) { + ret = -1; + goto reset_affinity; + } + + buf = alloc_buffer(span, 1); + if (!buf) { + ret = -1; + goto pe_close; + } + + while (current_mask) { + snprintf(schemata, sizeof(schemata), "%lx", param->mask & ~current_mask); + ret = write_schemata("", schemata, uparams->cpu, test->resource); + if (ret) + goto free_buf; + snprintf(schemata, sizeof(schemata), "%lx", current_mask); + ret = write_schemata(param->ctrlgrp, schemata, uparams->cpu, test->resource); + if (ret) + goto free_buf; + + for (i = 0; i < NUM_OF_RUNS; i++) { + mem_flush(buf, span); + fill_cache_read(buf, span, true); + + ret = perf_event_reset_enable(pe_fd); + if (ret) + goto free_buf; + + fill_cache_read(buf, span, true); + + ret = perf_event_measure(pe_fd, &pe_read, param->filename, bm_pid); + if (ret) + goto free_buf; + } + current_mask = next_mask(current_mask); + } + +free_buf: + free(buf); +pe_close: + close(pe_fd); +reset_affinity: + taskset_restore(bm_pid, &old_affinity); + + return ret; +} + +static int cat_run_test(const struct resctrl_test *test, const struct user_params *uparams) +{ + unsigned long long_mask, start_mask, full_cache_mask; + unsigned long cache_total_size = 0; + int n = uparams->bits; + unsigned int start; int count_of_bits; - char pipe_message; size_t span; + int ret; - /* Get default cbm mask for L3/L2 cache */ - ret = get_cbm_mask(cache_type, cbm_mask); + ret = get_full_cbm(test->resource, &full_cache_mask); + if (ret) + return ret; + /* Get the largest contiguous exclusive portion of the cache */ + ret = get_mask_no_shareable(test->resource, &long_mask); if (ret) return ret; - - long_mask = strtoul(cbm_mask, NULL, 16); /* Get L3/L2 cache size */ - ret = get_cache_size(cpu_no, cache_type, &cache_size); + ret = get_cache_size(uparams->cpu, test->resource, &cache_total_size); if (ret) return ret; - ksft_print_msg("Cache size :%lu\n", cache_size); + ksft_print_msg("Cache size :%lu\n", cache_total_size); - /* Get max number of bits from default-cabm mask */ - count_of_bits = count_bits(long_mask); + count_of_bits = count_contiguous_bits(long_mask, &start); if (!n) n = count_of_bits / 2; @@ -123,89 +269,124 @@ int cat_perf_miss_val(int cpu_no, int n, char *cache_type) count_of_bits - 1); return -1; } - - /* Get core id from same socket for running another thread */ - sibling_cpu_no = get_core_sibling(cpu_no); - if (sibling_cpu_no < 0) - return -1; + start_mask = create_bit_mask(start, n); struct resctrl_val_param param = { .resctrl_val = CAT_STR, - .cpu_no = cpu_no, - .setup = cat_setup, + .ctrlgrp = "c1", + .filename = RESULT_FILE_NAME, + .num_of_runs = 0, }; + param.mask = long_mask; + span = cache_portion_size(cache_total_size, start_mask, full_cache_mask); - l_mask = long_mask >> n; - l_mask_1 = ~l_mask & long_mask; + remove(param.filename); - /* Set param values for parent thread which will be allocated bitmask - * with (max_bits - n) bits - */ - span = cache_size * (count_of_bits - n) / count_of_bits; - strcpy(param.ctrlgrp, "c2"); - strcpy(param.mongrp, "m2"); - strcpy(param.filename, RESULT_FILE_NAME2); - param.mask = l_mask; - param.num_of_runs = 0; - - if (pipe(pipefd)) { - perror("# Unable to create pipe"); - return errno; - } + ret = cat_test(test, uparams, ¶m, span, start_mask); + if (ret) + goto out; - fflush(stdout); - bm_pid = fork(); + ret = check_results(¶m, test->resource, + cache_total_size, full_cache_mask, start_mask); +out: + cat_test_cleanup(); - /* Set param values for child thread which will be allocated bitmask - * with n bits - */ - if (bm_pid == 0) { - param.mask = l_mask_1; - strcpy(param.ctrlgrp, "c1"); - strcpy(param.mongrp, "m1"); - span = cache_size * n / count_of_bits; - strcpy(param.filename, RESULT_FILE_NAME1); - param.num_of_runs = 0; - param.cpu_no = sibling_cpu_no; + return ret; +} + +static int noncont_cat_run_test(const struct resctrl_test *test, + const struct user_params *uparams) +{ + unsigned long full_cache_mask, cont_mask, noncont_mask; + unsigned int eax, ebx, ecx, edx, sparse_masks; + int bit_center, ret; + char schemata[64]; + + /* Check to compare sparse_masks content to CPUID output. */ + ret = resource_info_unsigned_get(test->resource, "sparse_masks", &sparse_masks); + if (ret) + return ret; + + if (!strcmp(test->resource, "L3")) + __cpuid_count(0x10, 1, eax, ebx, ecx, edx); + else if (!strcmp(test->resource, "L2")) + __cpuid_count(0x10, 2, eax, ebx, ecx, edx); + else + return -EINVAL; + + if (sparse_masks != ((ecx >> 3) & 1)) { + ksft_print_msg("CPUID output doesn't match 'sparse_masks' file content!\n"); + return 1; } - remove(param.filename); + /* Write checks initialization. */ + ret = get_full_cbm(test->resource, &full_cache_mask); + if (ret < 0) + return ret; + bit_center = count_bits(full_cache_mask) / 2; - ret = cat_val(¶m, span); - if (ret == 0) - ret = check_results(¶m, span); - - if (bm_pid == 0) { - /* Tell parent that child is ready */ - close(pipefd[0]); - pipe_message = 1; - if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) < - sizeof(pipe_message)) - /* - * Just print the error message. - * Let while(1) run and wait for itself to be killed. - */ - perror("# failed signaling parent process"); - - close(pipefd[1]); - while (1) - ; - } else { - /* Parent waits for child to be ready. */ - close(pipefd[1]); - pipe_message = 0; - while (pipe_message != 1) { - if (read(pipefd[0], &pipe_message, - sizeof(pipe_message)) < sizeof(pipe_message)) { - perror("# failed reading from child process"); - break; - } - } - close(pipefd[0]); - kill(bm_pid, SIGKILL); + /* + * The bit_center needs to be at least 3 to properly calculate the CBM + * hole in the noncont_mask. If it's smaller return an error since the + * cache mask is too short and that shouldn't happen. + */ + if (bit_center < 3) + return -EINVAL; + cont_mask = full_cache_mask >> bit_center; + + /* Contiguous mask write check. */ + snprintf(schemata, sizeof(schemata), "%lx", cont_mask); + ret = write_schemata("", schemata, uparams->cpu, test->resource); + if (ret) { + ksft_print_msg("Write of contiguous CBM failed\n"); + return 1; } - cat_test_cleanup(); + /* + * Non-contiguous mask write check. CBM has a 0xf hole approximately in the middle. + * Output is compared with support information to catch any edge case errors. + */ + noncont_mask = ~(0xfUL << (bit_center - 2)) & full_cache_mask; + snprintf(schemata, sizeof(schemata), "%lx", noncont_mask); + ret = write_schemata("", schemata, uparams->cpu, test->resource); + if (ret && sparse_masks) + ksft_print_msg("Non-contiguous CBMs supported but write of non-contiguous CBM failed\n"); + else if (ret && !sparse_masks) + ksft_print_msg("Non-contiguous CBMs not supported and write of non-contiguous CBM failed as expected\n"); + else if (!ret && !sparse_masks) + ksft_print_msg("Non-contiguous CBMs not supported but write of non-contiguous CBM succeeded\n"); + + return !ret == !sparse_masks; +} - return ret; +static bool noncont_cat_feature_check(const struct resctrl_test *test) +{ + if (!resctrl_resource_exists(test->resource)) + return false; + + return resource_info_file_exists(test->resource, "sparse_masks"); } + +struct resctrl_test l3_cat_test = { + .name = "L3_CAT", + .group = "CAT", + .resource = "L3", + .feature_check = test_resource_feature_check, + .run_test = cat_run_test, +}; + +struct resctrl_test l3_noncont_cat_test = { + .name = "L3_NONCONT_CAT", + .group = "CAT", + .resource = "L3", + .feature_check = noncont_cat_feature_check, + .run_test = noncont_cat_run_test, +}; + +struct resctrl_test l2_noncont_cat_test = { + .name = "L2_NONCONT_CAT", + .group = "CAT", + .resource = "L2", + .feature_check = noncont_cat_feature_check, + .run_test = noncont_cat_run_test, +}; diff --git a/tools/testing/selftests/resctrl/cmt_test.c b/tools/testing/selftests/resctrl/cmt_test.c index 50bdbce9fb..a81f91222a 100644 --- a/tools/testing/selftests/resctrl/cmt_test.c +++ b/tools/testing/selftests/resctrl/cmt_test.c @@ -16,7 +16,9 @@ #define MAX_DIFF 2000000 #define MAX_DIFF_PERCENT 15 -static int cmt_setup(struct resctrl_val_param *p) +static int cmt_setup(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *p) { /* Run NUM_OF_RUNS times */ if (p->num_of_runs >= NUM_OF_RUNS) @@ -27,6 +29,33 @@ static int cmt_setup(struct resctrl_val_param *p) return 0; } +static int show_results_info(unsigned long sum_llc_val, int no_of_bits, + unsigned long cache_span, unsigned long max_diff, + unsigned long max_diff_percent, unsigned long num_of_runs, + bool platform) +{ + unsigned long avg_llc_val = 0; + float diff_percent; + long avg_diff = 0; + int ret; + + avg_llc_val = sum_llc_val / num_of_runs; + avg_diff = (long)abs(cache_span - avg_llc_val); + diff_percent = ((float)cache_span - avg_llc_val) / cache_span * 100; + + ret = platform && abs((int)diff_percent) > max_diff_percent && + abs(avg_diff) > max_diff; + + ksft_print_msg("%s Check cache miss rate within %lu%%\n", + ret ? "Fail:" : "Pass:", max_diff_percent); + + ksft_print_msg("Percent diff=%d\n", abs((int)diff_percent)); + + show_cache_info(no_of_bits, avg_llc_val, cache_span, false); + + return ret; +} + static int check_results(struct resctrl_val_param *param, size_t span, int no_of_bits) { char *token_array[8], temp[512]; @@ -37,9 +66,9 @@ static int check_results(struct resctrl_val_param *param, size_t span, int no_of ksft_print_msg("Checking for pass/fail\n"); fp = fopen(param->filename, "r"); if (!fp) { - perror("# Error in opening file\n"); + ksft_perror("Error in opening file"); - return errno; + return -1; } while (fgets(temp, sizeof(temp), fp)) { @@ -58,9 +87,8 @@ static int check_results(struct resctrl_val_param *param, size_t span, int no_of } fclose(fp); - return show_cache_info(sum_llc_occu_resc, no_of_bits, span, - MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, - true, true); + return show_results_info(sum_llc_occu_resc, no_of_bits, span, + MAX_DIFF, MAX_DIFF_PERCENT, runs - 1, true); } void cmt_test_cleanup(void) @@ -68,28 +96,26 @@ void cmt_test_cleanup(void) remove(RESULT_FILE_NAME); } -int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd) +static int cmt_run_test(const struct resctrl_test *test, const struct user_params *uparams) { - const char * const *cmd = benchmark_cmd; + const char * const *cmd = uparams->benchmark_cmd; const char *new_cmd[BENCHMARK_ARGS]; - unsigned long cache_size = 0; + unsigned long cache_total_size = 0; + int n = uparams->bits ? : 5; unsigned long long_mask; char *span_str = NULL; - char cbm_mask[256]; int count_of_bits; size_t span; int ret, i; - ret = get_cbm_mask("L3", cbm_mask); + ret = get_full_cbm("L3", &long_mask); if (ret) return ret; - long_mask = strtoul(cbm_mask, NULL, 16); - - ret = get_cache_size(cpu_no, "L3", &cache_size); + ret = get_cache_size(uparams->cpu, "L3", &cache_total_size); if (ret) return ret; - ksft_print_msg("Cache size :%lu\n", cache_size); + ksft_print_msg("Cache size :%lu\n", cache_total_size); count_of_bits = count_bits(long_mask); @@ -103,19 +129,18 @@ int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd) .resctrl_val = CMT_STR, .ctrlgrp = "c1", .mongrp = "m1", - .cpu_no = cpu_no, .filename = RESULT_FILE_NAME, .mask = ~(long_mask << n) & long_mask, .num_of_runs = 0, .setup = cmt_setup, }; - span = cache_size * n / count_of_bits; + span = cache_portion_size(cache_total_size, param.mask, long_mask); if (strcmp(cmd[0], "fill_buf") == 0) { /* Duplicate the command to be able to replace span in it */ - for (i = 0; benchmark_cmd[i]; i++) - new_cmd[i] = benchmark_cmd[i]; + for (i = 0; uparams->benchmark_cmd[i]; i++) + new_cmd[i] = uparams->benchmark_cmd[i]; new_cmd[i] = NULL; ret = asprintf(&span_str, "%zu", span); @@ -127,11 +152,13 @@ int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd) remove(RESULT_FILE_NAME); - ret = resctrl_val(cmd, ¶m); + ret = resctrl_val(test, uparams, cmd, ¶m); if (ret) goto out; ret = check_results(¶m, span, n); + if (ret && (get_vendor() == ARCH_INTEL)) + ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); out: cmt_test_cleanup(); @@ -139,3 +166,16 @@ out: return ret; } + +static bool cmt_feature_check(const struct resctrl_test *test) +{ + return test_resource_feature_check(test) && + resctrl_mon_feature_exists("L3_MON", "llc_occupancy"); +} + +struct resctrl_test cmt_test = { + .name = "CMT", + .resource = "L3", + .feature_check = cmt_feature_check, + .run_test = cmt_run_test, +}; diff --git a/tools/testing/selftests/resctrl/fill_buf.c b/tools/testing/selftests/resctrl/fill_buf.c index 0d425f2658..ae120f1735 100644 --- a/tools/testing/selftests/resctrl/fill_buf.c +++ b/tools/testing/selftests/resctrl/fill_buf.c @@ -38,7 +38,7 @@ static void cl_flush(void *p) #endif } -static void mem_flush(unsigned char *buf, size_t buf_size) +void mem_flush(unsigned char *buf, size_t buf_size) { unsigned char *cp = buf; size_t i = 0; @@ -51,39 +51,38 @@ static void mem_flush(unsigned char *buf, size_t buf_size) sb(); } -static void *malloc_and_init_memory(size_t buf_size) -{ - void *p = NULL; - uint64_t *p64; - size_t s64; - int ret; - - ret = posix_memalign(&p, PAGE_SIZE, buf_size); - if (ret < 0) - return NULL; - - p64 = (uint64_t *)p; - s64 = buf_size / sizeof(uint64_t); - - while (s64 > 0) { - *p64 = (uint64_t)rand(); - p64 += (CL_SIZE / sizeof(uint64_t)); - s64 -= (CL_SIZE / sizeof(uint64_t)); - } - - return p; -} +/* + * Buffer index step advance to workaround HW prefetching interfering with + * the measurements. + * + * Must be a prime to step through all indexes of the buffer. + * + * Some primes work better than others on some architectures (from MBA/MBM + * result stability point of view). + */ +#define FILL_IDX_MULT 23 static int fill_one_span_read(unsigned char *buf, size_t buf_size) { - unsigned char *end_ptr = buf + buf_size; - unsigned char sum, *p; - - sum = 0; - p = buf; - while (p < end_ptr) { - sum += *p; - p += (CL_SIZE / 2); + unsigned int size = buf_size / (CL_SIZE / 2); + unsigned int i, idx = 0; + unsigned char sum = 0; + + /* + * Read the buffer in an order that is unexpected by HW prefetching + * optimizations to prevent them interfering with the caching pattern. + * + * The read order is (in terms of halves of cachelines): + * i * FILL_IDX_MULT % size + * The formula is open-coded below to avoiding modulo inside the loop + * as it improves MBA/MBM result stability on some architectures. + */ + for (i = 0; i < size; i++) { + sum += buf[idx * (CL_SIZE / 2)]; + + idx += FILL_IDX_MULT; + while (idx >= size) + idx -= size; } return sum; @@ -101,10 +100,9 @@ static void fill_one_span_write(unsigned char *buf, size_t buf_size) } } -static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) +void fill_cache_read(unsigned char *buf, size_t buf_size, bool once) { int ret = 0; - FILE *fp; while (1) { ret = fill_one_span_read(buf, buf_size); @@ -113,67 +111,59 @@ static int fill_cache_read(unsigned char *buf, size_t buf_size, bool once) } /* Consume read result so that reading memory is not optimized out. */ - fp = fopen("/dev/null", "w"); - if (!fp) { - perror("Unable to write to /dev/null"); - return -1; - } - fprintf(fp, "Sum: %d ", ret); - fclose(fp); - - return 0; + *value_sink = ret; } -static int fill_cache_write(unsigned char *buf, size_t buf_size, bool once) +static void fill_cache_write(unsigned char *buf, size_t buf_size, bool once) { while (1) { fill_one_span_write(buf, buf_size); if (once) break; } - - return 0; } -static int fill_cache(size_t buf_size, int memflush, int op, bool once) +unsigned char *alloc_buffer(size_t buf_size, int memflush) { - unsigned char *buf; + void *buf = NULL; + uint64_t *p64; + size_t s64; int ret; - buf = malloc_and_init_memory(buf_size); - if (!buf) - return -1; - - /* Flush the memory before using to avoid "cache hot pages" effect */ - if (memflush) - mem_flush(buf, buf_size); - - if (op == 0) - ret = fill_cache_read(buf, buf_size, once); - else - ret = fill_cache_write(buf, buf_size, once); + ret = posix_memalign(&buf, PAGE_SIZE, buf_size); + if (ret < 0) + return NULL; - free(buf); + /* Initialize the buffer */ + p64 = buf; + s64 = buf_size / sizeof(uint64_t); - if (ret) { - printf("\n Error in fill cache read/write...\n"); - return -1; + while (s64 > 0) { + *p64 = (uint64_t)rand(); + p64 += (CL_SIZE / sizeof(uint64_t)); + s64 -= (CL_SIZE / sizeof(uint64_t)); } + /* Flush the memory before using to avoid "cache hot pages" effect */ + if (memflush) + mem_flush(buf, buf_size); - return 0; + return buf; } -int run_fill_buf(size_t span, int memflush, int op, bool once) +int run_fill_buf(size_t buf_size, int memflush, int op, bool once) { - size_t cache_size = span; - int ret; + unsigned char *buf; - ret = fill_cache(cache_size, memflush, op, once); - if (ret) { - printf("\n Error in fill cache\n"); + buf = alloc_buffer(buf_size, memflush); + if (!buf) return -1; - } + + if (op == 0) + fill_cache_read(buf, buf_size, once); + else + fill_cache_write(buf, buf_size, once); + free(buf); return 0; } diff --git a/tools/testing/selftests/resctrl/mba_test.c b/tools/testing/selftests/resctrl/mba_test.c index d3bf436834..7946e32e85 100644 --- a/tools/testing/selftests/resctrl/mba_test.c +++ b/tools/testing/selftests/resctrl/mba_test.c @@ -22,7 +22,9 @@ * con_mon grp, mon_grp in resctrl FS. * For each allocation, run 5 times in order to get average values. */ -static int mba_setup(struct resctrl_val_param *p) +static int mba_setup(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *p) { static int runs_per_allocation, allocation = 100; char allocation_str[64]; @@ -40,8 +42,7 @@ static int mba_setup(struct resctrl_val_param *p) sprintf(allocation_str, "%d", allocation); - ret = write_schemata(p->ctrlgrp, allocation_str, p->cpu_no, - p->resctrl_val); + ret = write_schemata(p->ctrlgrp, allocation_str, uparams->cpu, test->resource); if (ret < 0) return ret; @@ -109,9 +110,9 @@ static int check_results(void) fp = fopen(output, "r"); if (!fp) { - perror(output); + ksft_perror(output); - return errno; + return -1; } runs = 0; @@ -141,13 +142,12 @@ void mba_test_cleanup(void) remove(RESULT_FILE_NAME); } -int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd) +static int mba_run_test(const struct resctrl_test *test, const struct user_params *uparams) { struct resctrl_val_param param = { .resctrl_val = MBA_STR, .ctrlgrp = "c1", .mongrp = "m1", - .cpu_no = cpu_no, .filename = RESULT_FILE_NAME, .bw_report = "reads", .setup = mba_setup @@ -156,7 +156,7 @@ int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd) remove(RESULT_FILE_NAME); - ret = resctrl_val(benchmark_cmd, ¶m); + ret = resctrl_val(test, uparams, uparams->benchmark_cmd, ¶m); if (ret) goto out; @@ -167,3 +167,17 @@ out: return ret; } + +static bool mba_feature_check(const struct resctrl_test *test) +{ + return test_resource_feature_check(test) && + resctrl_mon_feature_exists("L3_MON", "mbm_local_bytes"); +} + +struct resctrl_test mba_test = { + .name = "MBA", + .resource = "MB", + .vendor_specific = ARCH_INTEL, + .feature_check = mba_feature_check, + .run_test = mba_run_test, +}; diff --git a/tools/testing/selftests/resctrl/mbm_test.c b/tools/testing/selftests/resctrl/mbm_test.c index 741533f2b0..d67ffa3ec6 100644 --- a/tools/testing/selftests/resctrl/mbm_test.c +++ b/tools/testing/selftests/resctrl/mbm_test.c @@ -59,9 +59,9 @@ static int check_results(size_t span) fp = fopen(output, "r"); if (!fp) { - perror(output); + ksft_perror(output); - return errno; + return -1; } runs = 0; @@ -86,7 +86,9 @@ static int check_results(size_t span) return ret; } -static int mbm_setup(struct resctrl_val_param *p) +static int mbm_setup(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *p) { int ret = 0; @@ -95,9 +97,8 @@ static int mbm_setup(struct resctrl_val_param *p) return END_OF_TESTS; /* Set up shemata with 100% allocation on the first run. */ - if (p->num_of_runs == 0 && validate_resctrl_feature_request("MB", NULL)) - ret = write_schemata(p->ctrlgrp, "100", p->cpu_no, - p->resctrl_val); + if (p->num_of_runs == 0 && resctrl_resource_exists("MB")) + ret = write_schemata(p->ctrlgrp, "100", uparams->cpu, test->resource); p->num_of_runs++; @@ -109,13 +110,12 @@ void mbm_test_cleanup(void) remove(RESULT_FILE_NAME); } -int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd) +static int mbm_run_test(const struct resctrl_test *test, const struct user_params *uparams) { struct resctrl_val_param param = { .resctrl_val = MBM_STR, .ctrlgrp = "c1", .mongrp = "m1", - .cpu_no = cpu_no, .filename = RESULT_FILE_NAME, .bw_report = "reads", .setup = mbm_setup @@ -124,14 +124,30 @@ int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd) remove(RESULT_FILE_NAME); - ret = resctrl_val(benchmark_cmd, ¶m); + ret = resctrl_val(test, uparams, uparams->benchmark_cmd, ¶m); if (ret) goto out; ret = check_results(DEFAULT_SPAN); + if (ret && (get_vendor() == ARCH_INTEL)) + ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); out: mbm_test_cleanup(); return ret; } + +static bool mbm_feature_check(const struct resctrl_test *test) +{ + return resctrl_mon_feature_exists("L3_MON", "mbm_total_bytes") && + resctrl_mon_feature_exists("L3_MON", "mbm_local_bytes"); +} + +struct resctrl_test mbm_test = { + .name = "MBM", + .resource = "MB", + .vendor_specific = ARCH_INTEL, + .feature_check = mbm_feature_check, + .run_test = mbm_run_test, +}; diff --git a/tools/testing/selftests/resctrl/resctrl.h b/tools/testing/selftests/resctrl/resctrl.h index a33f414f60..2051bd135e 100644 --- a/tools/testing/selftests/resctrl/resctrl.h +++ b/tools/testing/selftests/resctrl/resctrl.h @@ -28,6 +28,12 @@ #define PHYS_ID_PATH "/sys/devices/system/cpu/cpu" #define INFO_PATH "/sys/fs/resctrl/info" +/* + * CPU vendor IDs + * + * Define as bits because they're used for vendor_specific bitmask in + * the struct resctrl_test. + */ #define ARCH_INTEL 1 #define ARCH_AMD 2 @@ -37,20 +43,52 @@ #define DEFAULT_SPAN (250 * MB) -#define PARENT_EXIT(err_msg) \ +#define PARENT_EXIT() \ do { \ - perror(err_msg); \ kill(ppid, SIGKILL); \ umount_resctrlfs(); \ exit(EXIT_FAILURE); \ } while (0) /* + * user_params: User supplied parameters + * @cpu: CPU number to which the benchmark will be bound to + * @bits: Number of bits used for cache allocation size + * @benchmark_cmd: Benchmark command to run during (some of the) tests + */ +struct user_params { + int cpu; + int bits; + const char *benchmark_cmd[BENCHMARK_ARGS]; +}; + +/* + * resctrl_test: resctrl test definition + * @name: Test name + * @group: Test group - a common name for tests that share some characteristic + * (e.g., L3 CAT test belongs to the CAT group). Can be NULL + * @resource: Resource to test (e.g., MB, L3, L2, etc.) + * @vendor_specific: Bitmask for vendor-specific tests (can be 0 for universal tests) + * @disabled: Test is disabled + * @feature_check: Callback to check required resctrl features + * @run_test: Callback to run the test + */ +struct resctrl_test { + const char *name; + const char *group; + const char *resource; + unsigned int vendor_specific; + bool disabled; + bool (*feature_check)(const struct resctrl_test *test); + int (*run_test)(const struct resctrl_test *test, + const struct user_params *uparams); +}; + +/* * resctrl_val_param: resctrl test parameters * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc) * @ctrlgrp: Name of the control monitor group (con_mon grp) * @mongrp: Name of the monitor group (mon grp) - * @cpu_no: CPU number to which the benchmark would be binded * @filename: Name of file to which the o/p should be written * @bw_report: Bandwidth report type (reads vs writes) * @setup: Call back function to setup test environment @@ -59,12 +97,20 @@ struct resctrl_val_param { char *resctrl_val; char ctrlgrp[64]; char mongrp[64]; - int cpu_no; char filename[64]; char *bw_report; unsigned long mask; int num_of_runs; - int (*setup)(struct resctrl_val_param *param); + int (*setup)(const struct resctrl_test *test, + const struct user_params *uparams, + struct resctrl_val_param *param); +}; + +struct perf_event_read { + __u64 nr; /* The number of events */ + struct { + __u64 value; /* The value of the event */ + } values[2]; }; #define MBM_STR "mbm" @@ -72,6 +118,13 @@ struct resctrl_val_param { #define CMT_STR "cmt" #define CAT_STR "cat" +/* + * Memory location that consumes values compiler must not optimize away. + * Volatile ensures writes to this location cannot be optimized away by + * compiler. + */ +extern volatile int *value_sink; + extern pid_t bm_pid, ppid; extern char llc_occup_path[1024]; @@ -79,42 +132,84 @@ extern char llc_occup_path[1024]; int get_vendor(void); bool check_resctrlfs_support(void); int filter_dmesg(void); -int get_resource_id(int cpu_no, int *resource_id); +int get_domain_id(const char *resource, int cpu_no, int *domain_id); int mount_resctrlfs(void); int umount_resctrlfs(void); int validate_bw_report_request(char *bw_report); -bool validate_resctrl_feature_request(const char *resource, const char *feature); +bool resctrl_resource_exists(const char *resource); +bool resctrl_mon_feature_exists(const char *resource, const char *feature); +bool resource_info_file_exists(const char *resource, const char *file); +bool test_resource_feature_check(const struct resctrl_test *test); char *fgrep(FILE *inf, const char *str); -int taskset_benchmark(pid_t bm_pid, int cpu_no); -int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, - char *resctrl_val); +int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity); +int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity); +int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource); int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp, char *resctrl_val); int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsigned long flags); -int run_fill_buf(size_t span, int memflush, int op, bool once); -int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param); -int mbm_bw_change(int cpu_no, const char * const *benchmark_cmd); +unsigned char *alloc_buffer(size_t buf_size, int memflush); +void mem_flush(unsigned char *buf, size_t buf_size); +void fill_cache_read(unsigned char *buf, size_t buf_size, bool once); +int run_fill_buf(size_t buf_size, int memflush, int op, bool once); +int resctrl_val(const struct resctrl_test *test, + const struct user_params *uparams, + const char * const *benchmark_cmd, + struct resctrl_val_param *param); void tests_cleanup(void); void mbm_test_cleanup(void); -int mba_schemata_change(int cpu_no, const char * const *benchmark_cmd); void mba_test_cleanup(void); -int get_cbm_mask(char *cache_type, char *cbm_mask); -int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size); +unsigned long create_bit_mask(unsigned int start, unsigned int len); +unsigned int count_contiguous_bits(unsigned long val, unsigned int *start); +int get_full_cbm(const char *cache_type, unsigned long *mask); +int get_mask_no_shareable(const char *cache_type, unsigned long *mask); +int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size); +int resource_info_unsigned_get(const char *resource, const char *filename, unsigned int *val); void ctrlc_handler(int signum, siginfo_t *info, void *ptr); int signal_handler_register(void); void signal_handler_unregister(void); -int cat_val(struct resctrl_val_param *param, size_t span); void cat_test_cleanup(void); -int cat_perf_miss_val(int cpu_no, int no_of_bits, char *cache_type); -int cmt_resctrl_val(int cpu_no, int n, const char * const *benchmark_cmd); unsigned int count_bits(unsigned long n); void cmt_test_cleanup(void); -int get_core_sibling(int cpu_no); -int measure_cache_vals(struct resctrl_val_param *param, int bm_pid); -int show_cache_info(unsigned long sum_llc_val, int no_of_bits, - size_t cache_span, unsigned long max_diff, - unsigned long max_diff_percent, unsigned long num_of_runs, - bool platform, bool cmt); + +void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config); +void perf_event_initialize_read_format(struct perf_event_read *pe_read); +int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no); +int perf_event_reset_enable(int pe_fd); +int perf_event_measure(int pe_fd, struct perf_event_read *pe_read, + const char *filename, int bm_pid); +int measure_llc_resctrl(const char *filename, int bm_pid); +void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines); + +/* + * cache_portion_size - Calculate the size of a cache portion + * @cache_size: Total cache size in bytes + * @portion_mask: Cache portion mask + * @full_cache_mask: Full Cache Bit Mask (CBM) for the cache + * + * Return: The size of the cache portion in bytes. + */ +static inline unsigned long cache_portion_size(unsigned long cache_size, + unsigned long portion_mask, + unsigned long full_cache_mask) +{ + unsigned int bits = count_bits(full_cache_mask); + + /* + * With no bits the full CBM, assume cache cannot be split into + * smaller portions. To avoid divide by zero, return cache_size. + */ + if (!bits) + return cache_size; + + return cache_size * count_bits(portion_mask) / bits; +} + +extern struct resctrl_test mbm_test; +extern struct resctrl_test mba_test; +extern struct resctrl_test cmt_test; +extern struct resctrl_test l3_cat_test; +extern struct resctrl_test l3_noncont_cat_test; +extern struct resctrl_test l2_noncont_cat_test; #endif /* RESCTRL_H */ diff --git a/tools/testing/selftests/resctrl/resctrl_tests.c b/tools/testing/selftests/resctrl/resctrl_tests.c index 2bbe3045a0..f3dc1b9696 100644 --- a/tools/testing/selftests/resctrl/resctrl_tests.c +++ b/tools/testing/selftests/resctrl/resctrl_tests.c @@ -10,6 +10,19 @@ */ #include "resctrl.h" +/* Volatile memory sink to prevent compiler optimizations */ +static volatile int sink_target; +volatile int *value_sink = &sink_target; + +static struct resctrl_test *resctrl_tests[] = { + &mbm_test, + &mba_test, + &cmt_test, + &l3_cat_test, + &l3_noncont_cat_test, + &l2_noncont_cat_test, +}; + static int detect_vendor(void) { FILE *inf = fopen("/proc/cpuinfo", "r"); @@ -49,11 +62,20 @@ int get_vendor(void) static void cmd_help(void) { + int i; + printf("usage: resctrl_tests [-h] [-t test list] [-n no_of_bits] [-b benchmark_cmd [option]...]\n"); printf("\t-b benchmark_cmd [option]...: run specified benchmark for MBM, MBA and CMT\n"); printf("\t default benchmark is builtin fill_buf\n"); - printf("\t-t test list: run tests specified in the test list, "); + printf("\t-t test list: run tests/groups specified by the list, "); printf("e.g. -t mbm,mba,cmt,cat\n"); + printf("\t\tSupported tests (group):\n"); + for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) { + if (resctrl_tests[i]->group) + printf("\t\t\t%s (%s)\n", resctrl_tests[i]->name, resctrl_tests[i]->group); + else + printf("\t\t\t%s\n", resctrl_tests[i]->name); + } printf("\t-n no_of_bits: run cache tests using specified no of bits in cache bit mask\n"); printf("\t-p cpu_no: specify CPU number to run the test. 1 is default\n"); printf("\t-h: help\n"); @@ -92,116 +114,63 @@ static void test_cleanup(void) signal_handler_unregister(); } -static void run_mbm_test(const char * const *benchmark_cmd, int cpu_no) +static bool test_vendor_specific_check(const struct resctrl_test *test) { - int res; - - ksft_print_msg("Starting MBM BW change ...\n"); - - if (test_prepare()) { - ksft_exit_fail_msg("Abnormal failure when preparing for the test\n"); - return; - } - - if (!validate_resctrl_feature_request("L3_MON", "mbm_total_bytes") || - !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") || - (get_vendor() != ARCH_INTEL)) { - ksft_test_result_skip("Hardware does not support MBM or MBM is disabled\n"); - goto cleanup; - } - - res = mbm_bw_change(cpu_no, benchmark_cmd); - ksft_test_result(!res, "MBM: bw change\n"); - if ((get_vendor() == ARCH_INTEL) && res) - ksft_print_msg("Intel MBM may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); + if (!test->vendor_specific) + return true; -cleanup: - test_cleanup(); + return get_vendor() & test->vendor_specific; } -static void run_mba_test(const char * const *benchmark_cmd, int cpu_no) +static void run_single_test(const struct resctrl_test *test, const struct user_params *uparams) { - int res; - - ksft_print_msg("Starting MBA Schemata change ...\n"); + int ret; - if (test_prepare()) { - ksft_exit_fail_msg("Abnormal failure when preparing for the test\n"); + if (test->disabled) return; - } - if (!validate_resctrl_feature_request("MB", NULL) || - !validate_resctrl_feature_request("L3_MON", "mbm_local_bytes") || - (get_vendor() != ARCH_INTEL)) { - ksft_test_result_skip("Hardware does not support MBA or MBA is disabled\n"); - goto cleanup; + if (!test_vendor_specific_check(test)) { + ksft_test_result_skip("Hardware does not support %s\n", test->name); + return; } - res = mba_schemata_change(cpu_no, benchmark_cmd); - ksft_test_result(!res, "MBA: schemata change\n"); - -cleanup: - test_cleanup(); -} - -static void run_cmt_test(const char * const *benchmark_cmd, int cpu_no) -{ - int res; - - ksft_print_msg("Starting CMT test ...\n"); + ksft_print_msg("Starting %s test ...\n", test->name); if (test_prepare()) { ksft_exit_fail_msg("Abnormal failure when preparing for the test\n"); return; } - if (!validate_resctrl_feature_request("L3_MON", "llc_occupancy") || - !validate_resctrl_feature_request("L3", NULL)) { - ksft_test_result_skip("Hardware does not support CMT or CMT is disabled\n"); + if (!test->feature_check(test)) { + ksft_test_result_skip("Hardware does not support %s or %s is disabled\n", + test->name, test->name); goto cleanup; } - res = cmt_resctrl_val(cpu_no, 5, benchmark_cmd); - ksft_test_result(!res, "CMT: test\n"); - if ((get_vendor() == ARCH_INTEL) && res) - ksft_print_msg("Intel CMT may be inaccurate when Sub-NUMA Clustering is enabled. Check BIOS configuration.\n"); + ret = test->run_test(test, uparams); + ksft_test_result(!ret, "%s: test\n", test->name); cleanup: test_cleanup(); } -static void run_cat_test(int cpu_no, int no_of_bits) +static void init_user_params(struct user_params *uparams) { - int res; - - ksft_print_msg("Starting CAT test ...\n"); - - if (test_prepare()) { - ksft_exit_fail_msg("Abnormal failure when preparing for the test\n"); - return; - } - - if (!validate_resctrl_feature_request("L3", NULL)) { - ksft_test_result_skip("Hardware does not support CAT or CAT is disabled\n"); - goto cleanup; - } + memset(uparams, 0, sizeof(*uparams)); - res = cat_perf_miss_val(cpu_no, no_of_bits, "L3"); - ksft_test_result(!res, "CAT: test\n"); - -cleanup: - test_cleanup(); + uparams->cpu = 1; + uparams->bits = 0; } int main(int argc, char **argv) { - bool mbm_test = true, mba_test = true, cmt_test = true; - const char *benchmark_cmd[BENCHMARK_ARGS] = {}; - int c, cpu_no = 1, i, no_of_bits = 0; + int tests = ARRAY_SIZE(resctrl_tests); + bool test_param_seen = false; + struct user_params uparams; char *span_str = NULL; - bool cat_test = true; - int tests = 0; - int ret; + int ret, c, i; + + init_user_params(&uparams); while ((c = getopt(argc, argv, "ht:b:n:p:")) != -1) { char *token; @@ -219,32 +188,35 @@ int main(int argc, char **argv) /* Extract benchmark command from command line. */ for (i = 0; i < argc - optind; i++) - benchmark_cmd[i] = argv[i + optind]; - benchmark_cmd[i] = NULL; + uparams.benchmark_cmd[i] = argv[i + optind]; + uparams.benchmark_cmd[i] = NULL; goto last_arg; case 't': token = strtok(optarg, ","); - mbm_test = false; - mba_test = false; - cmt_test = false; - cat_test = false; + if (!test_param_seen) { + for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) + resctrl_tests[i]->disabled = true; + tests = 0; + test_param_seen = true; + } while (token) { - if (!strncmp(token, MBM_STR, sizeof(MBM_STR))) { - mbm_test = true; - tests++; - } else if (!strncmp(token, MBA_STR, sizeof(MBA_STR))) { - mba_test = true; - tests++; - } else if (!strncmp(token, CMT_STR, sizeof(CMT_STR))) { - cmt_test = true; - tests++; - } else if (!strncmp(token, CAT_STR, sizeof(CAT_STR))) { - cat_test = true; - tests++; - } else { - printf("invalid argument\n"); + bool found = false; + + for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) { + if (!strcasecmp(token, resctrl_tests[i]->name) || + (resctrl_tests[i]->group && + !strcasecmp(token, resctrl_tests[i]->group))) { + if (resctrl_tests[i]->disabled) + tests++; + resctrl_tests[i]->disabled = false; + found = true; + } + } + + if (!found) { + printf("invalid test: %s\n", token); return -1; } @@ -252,11 +224,11 @@ int main(int argc, char **argv) } break; case 'p': - cpu_no = atoi(optarg); + uparams.cpu = atoi(optarg); break; case 'n': - no_of_bits = atoi(optarg); - if (no_of_bits <= 0) { + uparams.bits = atoi(optarg); + if (uparams.bits <= 0) { printf("Bail out! invalid argument for no_of_bits\n"); return -1; } @@ -291,32 +263,23 @@ last_arg: filter_dmesg(); - if (!benchmark_cmd[0]) { + if (!uparams.benchmark_cmd[0]) { /* If no benchmark is given by "-b" argument, use fill_buf. */ - benchmark_cmd[0] = "fill_buf"; + uparams.benchmark_cmd[0] = "fill_buf"; ret = asprintf(&span_str, "%u", DEFAULT_SPAN); if (ret < 0) ksft_exit_fail_msg("Out of memory!\n"); - benchmark_cmd[1] = span_str; - benchmark_cmd[2] = "1"; - benchmark_cmd[3] = "0"; - benchmark_cmd[4] = "false"; - benchmark_cmd[5] = NULL; + uparams.benchmark_cmd[1] = span_str; + uparams.benchmark_cmd[2] = "1"; + uparams.benchmark_cmd[3] = "0"; + uparams.benchmark_cmd[4] = "false"; + uparams.benchmark_cmd[5] = NULL; } - ksft_set_plan(tests ? : 4); - - if (mbm_test) - run_mbm_test(benchmark_cmd, cpu_no); - - if (mba_test) - run_mba_test(benchmark_cmd, cpu_no); - - if (cmt_test) - run_cmt_test(benchmark_cmd, cpu_no); + ksft_set_plan(tests); - if (cat_test) - run_cat_test(cpu_no, no_of_bits); + for (i = 0; i < ARRAY_SIZE(resctrl_tests); i++) + run_single_test(resctrl_tests[i], &uparams); free(span_str); ksft_finished(); diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c index 8878967891..5a49f07a6c 100644 --- a/tools/testing/selftests/resctrl/resctrl_val.c +++ b/tools/testing/selftests/resctrl/resctrl_val.c @@ -156,12 +156,12 @@ static int read_from_imc_dir(char *imc_dir, int count) sprintf(imc_counter_type, "%s%s", imc_dir, "type"); fp = fopen(imc_counter_type, "r"); if (!fp) { - perror("Failed to open imc counter type file"); + ksft_perror("Failed to open iMC counter type file"); return -1; } if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) { - perror("Could not get imc type"); + ksft_perror("Could not get iMC type"); fclose(fp); return -1; @@ -175,12 +175,12 @@ static int read_from_imc_dir(char *imc_dir, int count) sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME); fp = fopen(imc_counter_cfg, "r"); if (!fp) { - perror("Failed to open imc config file"); + ksft_perror("Failed to open iMC config file"); return -1; } if (fscanf(fp, "%s", cas_count_cfg) <= 0) { - perror("Could not get imc cas count read"); + ksft_perror("Could not get iMC cas count read"); fclose(fp); return -1; @@ -193,12 +193,12 @@ static int read_from_imc_dir(char *imc_dir, int count) sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME); fp = fopen(imc_counter_cfg, "r"); if (!fp) { - perror("Failed to open imc config file"); + ksft_perror("Failed to open iMC config file"); return -1; } if (fscanf(fp, "%s", cas_count_cfg) <= 0) { - perror("Could not get imc cas count write"); + ksft_perror("Could not get iMC cas count write"); fclose(fp); return -1; @@ -262,12 +262,12 @@ static int num_of_imcs(void) } closedir(dp); if (count == 0) { - perror("Unable find iMC counters!\n"); + ksft_print_msg("Unable to find iMC counters\n"); return -1; } } else { - perror("Unable to open PMU directory!\n"); + ksft_perror("Unable to open PMU directory"); return -1; } @@ -339,14 +339,14 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) if (read(r->fd, &r->return_value, sizeof(struct membw_read_format)) == -1) { - perror("Couldn't get read b/w through iMC"); + ksft_perror("Couldn't get read b/w through iMC"); return -1; } if (read(w->fd, &w->return_value, sizeof(struct membw_read_format)) == -1) { - perror("Couldn't get write bw through iMC"); + ksft_perror("Couldn't get write bw through iMC"); return -1; } @@ -387,20 +387,20 @@ static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc) return 0; } -void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id) +void set_mbm_path(const char *ctrlgrp, const char *mongrp, int domain_id) { if (ctrlgrp && mongrp) sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH, - RESCTRL_PATH, ctrlgrp, mongrp, resource_id); + RESCTRL_PATH, ctrlgrp, mongrp, domain_id); else if (!ctrlgrp && mongrp) sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH, - mongrp, resource_id); + mongrp, domain_id); else if (ctrlgrp && !mongrp) sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH, - ctrlgrp, resource_id); + ctrlgrp, domain_id); else if (!ctrlgrp && !mongrp) sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH, - resource_id); + domain_id); } /* @@ -413,23 +413,23 @@ void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id) static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp, int cpu_no, char *resctrl_val) { - int resource_id; + int domain_id; - if (get_resource_id(cpu_no, &resource_id) < 0) { - perror("Could not get resource_id"); + if (get_domain_id("MB", cpu_no, &domain_id) < 0) { + ksft_print_msg("Could not get domain ID\n"); return; } if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) - set_mbm_path(ctrlgrp, mongrp, resource_id); + set_mbm_path(ctrlgrp, mongrp, domain_id); if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) { if (ctrlgrp) sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, - RESCTRL_PATH, ctrlgrp, resource_id); + RESCTRL_PATH, ctrlgrp, domain_id); else sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, - RESCTRL_PATH, resource_id); + RESCTRL_PATH, domain_id); } } @@ -449,12 +449,12 @@ static int get_mem_bw_resctrl(unsigned long *mbm_total) fp = fopen(mbm_total_path, "r"); if (!fp) { - perror("Failed to open total bw file"); + ksft_perror("Failed to open total bw file"); return -1; } if (fscanf(fp, "%lu", mbm_total) <= 0) { - perror("Could not get mbm local bytes"); + ksft_perror("Could not get mbm local bytes"); fclose(fp); return -1; @@ -495,7 +495,7 @@ int signal_handler_register(void) if (sigaction(SIGINT, &sigact, NULL) || sigaction(SIGTERM, &sigact, NULL) || sigaction(SIGHUP, &sigact, NULL)) { - perror("# sigaction"); + ksft_perror("sigaction"); ret = -1; } return ret; @@ -515,7 +515,7 @@ void signal_handler_unregister(void) if (sigaction(SIGINT, &sigact, NULL) || sigaction(SIGTERM, &sigact, NULL) || sigaction(SIGHUP, &sigact, NULL)) { - perror("# sigaction"); + ksft_perror("sigaction"); } } @@ -526,7 +526,7 @@ void signal_handler_unregister(void) * @bw_imc: perf imc counter value * @bw_resc: memory bandwidth value * - * Return: 0 on success. non-zero on failure. + * Return: 0 on success, < 0 on error. */ static int print_results_bw(char *filename, int bm_pid, float bw_imc, unsigned long bw_resc) @@ -540,16 +540,16 @@ static int print_results_bw(char *filename, int bm_pid, float bw_imc, } else { fp = fopen(filename, "a"); if (!fp) { - perror("Cannot open results file"); + ksft_perror("Cannot open results file"); - return errno; + return -1; } if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n", bm_pid, bw_imc, bw_resc, diff) <= 0) { + ksft_print_msg("Could not log results\n"); fclose(fp); - perror("Could not log results."); - return errno; + return -1; } fclose(fp); } @@ -582,19 +582,20 @@ static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num) static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp, int cpu_no, char *resctrl_val) { - int resource_id; + int domain_id; - if (get_resource_id(cpu_no, &resource_id) < 0) { - perror("# Unable to resource_id"); + if (get_domain_id("L3", cpu_no, &domain_id) < 0) { + ksft_print_msg("Could not get domain ID\n"); return; } if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) - set_cmt_path(ctrlgrp, mongrp, resource_id); + set_cmt_path(ctrlgrp, mongrp, domain_id); } -static int -measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start) +static int measure_vals(const struct user_params *uparams, + struct resctrl_val_param *param, + unsigned long *bw_resc_start) { unsigned long bw_resc, bw_resc_end; float bw_imc; @@ -607,7 +608,7 @@ measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start) * Compare the two values to validate resctrl value. * It takes 1sec to measure the data. */ - ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc); + ret = get_mem_bw_imc(uparams->cpu, param->bw_report, &bw_imc); if (ret < 0) return ret; @@ -647,20 +648,24 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext) * stdio (console) */ fp = freopen("/dev/null", "w", stdout); - if (!fp) - PARENT_EXIT("Unable to direct benchmark status to /dev/null"); + if (!fp) { + ksft_perror("Unable to direct benchmark status to /dev/null"); + PARENT_EXIT(); + } if (strcmp(benchmark_cmd[0], "fill_buf") == 0) { /* Execute default fill_buf benchmark */ span = strtoul(benchmark_cmd[1], NULL, 10); memflush = atoi(benchmark_cmd[2]); operation = atoi(benchmark_cmd[3]); - if (!strcmp(benchmark_cmd[4], "true")) + if (!strcmp(benchmark_cmd[4], "true")) { once = true; - else if (!strcmp(benchmark_cmd[4], "false")) + } else if (!strcmp(benchmark_cmd[4], "false")) { once = false; - else - PARENT_EXIT("Invalid once parameter"); + } else { + ksft_print_msg("Invalid once parameter\n"); + PARENT_EXIT(); + } if (run_fill_buf(span, memflush, operation, once)) fprintf(stderr, "Error in running fill buffer\n"); @@ -668,22 +673,28 @@ static void run_benchmark(int signum, siginfo_t *info, void *ucontext) /* Execute specified benchmark */ ret = execvp(benchmark_cmd[0], benchmark_cmd); if (ret) - perror("wrong\n"); + ksft_perror("execvp"); } fclose(stdout); - PARENT_EXIT("Unable to run specified benchmark"); + ksft_print_msg("Unable to run specified benchmark\n"); + PARENT_EXIT(); } /* * resctrl_val: execute benchmark and measure memory bandwidth on * the benchmark + * @test: test information structure + * @uparams: user supplied parameters * @benchmark_cmd: benchmark command and its arguments * @param: parameters passed to resctrl_val() * - * Return: 0 on success. non-zero on failure. + * Return: 0 when the test was run, < 0 on error. */ -int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param) +int resctrl_val(const struct resctrl_test *test, + const struct user_params *uparams, + const char * const *benchmark_cmd, + struct resctrl_val_param *param) { char *resctrl_val = param->resctrl_val; unsigned long bw_resc_start = 0; @@ -709,7 +720,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par ppid = getpid(); if (pipe(pipefd)) { - perror("# Unable to create pipe"); + ksft_perror("Unable to create pipe"); return -1; } @@ -721,7 +732,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par fflush(stdout); bm_pid = fork(); if (bm_pid == -1) { - perror("# Unable to fork"); + ksft_perror("Unable to fork"); return -1; } @@ -738,15 +749,17 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par sigact.sa_flags = SA_SIGINFO; /* Register for "SIGUSR1" signal from parent */ - if (sigaction(SIGUSR1, &sigact, NULL)) - PARENT_EXIT("Can't register child for signal"); + if (sigaction(SIGUSR1, &sigact, NULL)) { + ksft_perror("Can't register child for signal"); + PARENT_EXIT(); + } /* Tell parent that child is ready */ close(pipefd[0]); pipe_message = 1; if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) < sizeof(pipe_message)) { - perror("# failed signaling parent process"); + ksft_perror("Failed signaling parent process"); close(pipefd[1]); return -1; } @@ -755,7 +768,8 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par /* Suspend child until delivery of "SIGUSR1" from parent */ sigsuspend(&sigact.sa_mask); - PARENT_EXIT("Child is done"); + ksft_perror("Child is done"); + PARENT_EXIT(); } ksft_print_msg("Benchmark PID: %d\n", bm_pid); @@ -769,7 +783,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par value.sival_ptr = (void *)benchmark_cmd; /* Taskset benchmark to specified cpu */ - ret = taskset_benchmark(bm_pid, param->cpu_no); + ret = taskset_benchmark(bm_pid, uparams->cpu, NULL); if (ret) goto out; @@ -786,17 +800,17 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par goto out; initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp, - param->cpu_no, resctrl_val); + uparams->cpu, resctrl_val); } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp, - param->cpu_no, resctrl_val); + uparams->cpu, resctrl_val); /* Parent waits for child to be ready. */ close(pipefd[1]); while (pipe_message != 1) { if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) < sizeof(pipe_message)) { - perror("# failed reading message from child process"); + ksft_perror("Failed reading message from child process"); close(pipefd[0]); goto out; } @@ -805,8 +819,8 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par /* Signal child to start benchmark */ if (sigqueue(bm_pid, SIGUSR1, value) == -1) { - perror("# sigqueue SIGUSR1 to child"); - ret = errno; + ksft_perror("sigqueue SIGUSR1 to child"); + ret = -1; goto out; } @@ -815,7 +829,7 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par /* Test runs until the callback setup() tells the test to stop. */ while (1) { - ret = param->setup(param); + ret = param->setup(test, uparams, param); if (ret == END_OF_TESTS) { ret = 0; break; @@ -825,12 +839,12 @@ int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *par if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) || !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) { - ret = measure_vals(param, &bw_resc_start); + ret = measure_vals(uparams, param, &bw_resc_start); if (ret) break; } else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) { sleep(1); - ret = measure_cache_vals(param, bm_pid); + ret = measure_llc_resctrl(param->filename, bm_pid); if (ret) break; } diff --git a/tools/testing/selftests/resctrl/resctrlfs.c b/tools/testing/selftests/resctrl/resctrlfs.c index 5ebd436838..1cade75176 100644 --- a/tools/testing/selftests/resctrl/resctrlfs.c +++ b/tools/testing/selftests/resctrl/resctrlfs.c @@ -20,7 +20,7 @@ static int find_resctrl_mount(char *buffer) mounts = fopen("/proc/mounts", "r"); if (!mounts) { - perror("/proc/mounts"); + ksft_perror("/proc/mounts"); return -ENXIO; } while (!feof(mounts)) { @@ -56,7 +56,7 @@ static int find_resctrl_mount(char *buffer) * Mounts resctrl FS. Fails if resctrl FS is already mounted to avoid * pre-existing settings interfering with the test results. * - * Return: 0 on success, non-zero on failure + * Return: 0 on success, < 0 on error. */ int mount_resctrlfs(void) { @@ -69,7 +69,7 @@ int mount_resctrlfs(void) ksft_print_msg("Mounting resctrl to \"%s\"\n", RESCTRL_PATH); ret = mount("resctrl", RESCTRL_PATH, "resctrl", 0, NULL); if (ret) - perror("# mount"); + ksft_perror("mount"); return ret; } @@ -86,41 +86,67 @@ int umount_resctrlfs(void) return ret; if (umount(mountpoint)) { - perror("# Unable to umount resctrl"); + ksft_perror("Unable to umount resctrl"); - return errno; + return -1; } return 0; } /* - * get_resource_id - Get socket number/l3 id for a specified CPU + * get_cache_level - Convert cache level from string to integer + * @cache_type: Cache level as string + * + * Return: cache level as integer or -1 if @cache_type is invalid. + */ +static int get_cache_level(const char *cache_type) +{ + if (!strcmp(cache_type, "L3")) + return 3; + if (!strcmp(cache_type, "L2")) + return 2; + + ksft_print_msg("Invalid cache level\n"); + return -1; +} + +static int get_resource_cache_level(const char *resource) +{ + /* "MB" use L3 (LLC) as resource */ + if (!strcmp(resource, "MB")) + return 3; + return get_cache_level(resource); +} + +/* + * get_domain_id - Get resctrl domain ID for a specified CPU + * @resource: resource name * @cpu_no: CPU number - * @resource_id: Socket number or l3_id + * @domain_id: domain ID (cache ID; for MB, L3 cache ID) * * Return: >= 0 on success, < 0 on failure. */ -int get_resource_id(int cpu_no, int *resource_id) +int get_domain_id(const char *resource, int cpu_no, int *domain_id) { char phys_pkg_path[1024]; + int cache_num; FILE *fp; - if (get_vendor() == ARCH_AMD) - sprintf(phys_pkg_path, "%s%d/cache/index3/id", - PHYS_ID_PATH, cpu_no); - else - sprintf(phys_pkg_path, "%s%d/topology/physical_package_id", - PHYS_ID_PATH, cpu_no); + cache_num = get_resource_cache_level(resource); + if (cache_num < 0) + return cache_num; + + sprintf(phys_pkg_path, "%s%d/cache/index%d/id", PHYS_ID_PATH, cpu_no, cache_num); fp = fopen(phys_pkg_path, "r"); if (!fp) { - perror("Failed to open physical_package_id"); + ksft_perror("Failed to open cache id file"); return -1; } - if (fscanf(fp, "%d", resource_id) <= 0) { - perror("Could not get socket number or l3 id"); + if (fscanf(fp, "%d", domain_id) <= 0) { + ksft_perror("Could not get domain ID"); fclose(fp); return -1; @@ -138,31 +164,26 @@ int get_resource_id(int cpu_no, int *resource_id) * * Return: = 0 on success, < 0 on failure. */ -int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size) +int get_cache_size(int cpu_no, const char *cache_type, unsigned long *cache_size) { char cache_path[1024], cache_str[64]; int length, i, cache_num; FILE *fp; - if (!strcmp(cache_type, "L3")) { - cache_num = 3; - } else if (!strcmp(cache_type, "L2")) { - cache_num = 2; - } else { - perror("Invalid cache level"); - return -1; - } + cache_num = get_cache_level(cache_type); + if (cache_num < 0) + return cache_num; sprintf(cache_path, "/sys/bus/cpu/devices/cpu%d/cache/index%d/size", cpu_no, cache_num); fp = fopen(cache_path, "r"); if (!fp) { - perror("Failed to open cache size"); + ksft_perror("Failed to open cache size"); return -1; } if (fscanf(fp, "%s", cache_str) <= 0) { - perror("Could not get cache_size"); + ksft_perror("Could not get cache_size"); fclose(fp); return -1; @@ -196,30 +217,29 @@ int get_cache_size(int cpu_no, char *cache_type, unsigned long *cache_size) #define CORE_SIBLINGS_PATH "/sys/bus/cpu/devices/cpu" /* - * get_cbm_mask - Get cbm mask for given cache - * @cache_type: Cache level L2/L3 - * @cbm_mask: cbm_mask returned as a string + * get_bit_mask - Get bit mask from given file + * @filename: File containing the mask + * @mask: The bit mask returned as unsigned long * * Return: = 0 on success, < 0 on failure. */ -int get_cbm_mask(char *cache_type, char *cbm_mask) +static int get_bit_mask(const char *filename, unsigned long *mask) { - char cbm_mask_path[1024]; FILE *fp; - if (!cbm_mask) + if (!filename || !mask) return -1; - sprintf(cbm_mask_path, "%s/%s/cbm_mask", INFO_PATH, cache_type); - - fp = fopen(cbm_mask_path, "r"); + fp = fopen(filename, "r"); if (!fp) { - perror("Failed to open cache level"); - + ksft_print_msg("Failed to open bit mask file '%s': %s\n", + filename, strerror(errno)); return -1; } - if (fscanf(fp, "%s", cbm_mask) <= 0) { - perror("Could not get max cbm_mask"); + + if (fscanf(fp, "%lx", mask) <= 0) { + ksft_print_msg("Could not read bit mask file '%s': %s\n", + filename, strerror(errno)); fclose(fp); return -1; @@ -230,64 +250,200 @@ int get_cbm_mask(char *cache_type, char *cbm_mask) } /* - * get_core_sibling - Get sibling core id from the same socket for given CPU - * @cpu_no: CPU number + * resource_info_unsigned_get - Read an unsigned value from + * /sys/fs/resctrl/info/@resource/@filename + * @resource: Resource name that matches directory name in + * /sys/fs/resctrl/info + * @filename: File in /sys/fs/resctrl/info/@resource + * @val: Contains read value on success. * - * Return: > 0 on success, < 0 on failure. + * Return: = 0 on success, < 0 on failure. On success the read + * value is saved into @val. */ -int get_core_sibling(int cpu_no) +int resource_info_unsigned_get(const char *resource, const char *filename, + unsigned int *val) { - char core_siblings_path[1024], cpu_list_str[64]; - int sibling_cpu_no = -1; + char file_path[PATH_MAX]; FILE *fp; - sprintf(core_siblings_path, "%s%d/topology/core_siblings_list", - CORE_SIBLINGS_PATH, cpu_no); + snprintf(file_path, sizeof(file_path), "%s/%s/%s", INFO_PATH, resource, + filename); - fp = fopen(core_siblings_path, "r"); + fp = fopen(file_path, "r"); if (!fp) { - perror("Failed to open core siblings path"); - + ksft_print_msg("Error opening %s: %m\n", file_path); return -1; } - if (fscanf(fp, "%s", cpu_list_str) <= 0) { - perror("Could not get core_siblings list"); - fclose(fp); + if (fscanf(fp, "%u", val) <= 0) { + ksft_print_msg("Could not get contents of %s: %m\n", file_path); + fclose(fp); return -1; } + fclose(fp); + return 0; +} - char *token = strtok(cpu_list_str, "-,"); +/* + * create_bit_mask- Create bit mask from start, len pair + * @start: LSB of the mask + * @len Number of bits in the mask + */ +unsigned long create_bit_mask(unsigned int start, unsigned int len) +{ + return ((1UL << len) - 1UL) << start; +} - while (token) { - sibling_cpu_no = atoi(token); - /* Skipping core 0 as we don't want to run test on core 0 */ - if (sibling_cpu_no != 0 && sibling_cpu_no != cpu_no) - break; - token = strtok(NULL, "-,"); +/* + * count_contiguous_bits - Returns the longest train of bits in a bit mask + * @val A bit mask + * @start The location of the least-significant bit of the longest train + * + * Return: The length of the contiguous bits in the longest train of bits + */ +unsigned int count_contiguous_bits(unsigned long val, unsigned int *start) +{ + unsigned long last_val; + unsigned int count = 0; + + while (val) { + last_val = val; + val &= (val >> 1); + count++; + } + + if (start) { + if (count) + *start = ffsl(last_val) - 1; + else + *start = 0; } - return sibling_cpu_no; + return count; +} + +/* + * get_full_cbm - Get full Cache Bit Mask (CBM) + * @cache_type: Cache type as "L2" or "L3" + * @mask: Full cache bit mask representing the maximal portion of cache + * available for allocation, returned as unsigned long. + * + * Return: = 0 on success, < 0 on failure. + */ +int get_full_cbm(const char *cache_type, unsigned long *mask) +{ + char cbm_path[PATH_MAX]; + int ret; + + if (!cache_type) + return -1; + + snprintf(cbm_path, sizeof(cbm_path), "%s/%s/cbm_mask", + INFO_PATH, cache_type); + + ret = get_bit_mask(cbm_path, mask); + if (ret || !*mask) + return -1; + + return 0; +} + +/* + * get_shareable_mask - Get shareable mask from shareable_bits + * @cache_type: Cache type as "L2" or "L3" + * @shareable_mask: Shareable mask returned as unsigned long + * + * Return: = 0 on success, < 0 on failure. + */ +static int get_shareable_mask(const char *cache_type, unsigned long *shareable_mask) +{ + char mask_path[PATH_MAX]; + + if (!cache_type) + return -1; + + snprintf(mask_path, sizeof(mask_path), "%s/%s/shareable_bits", + INFO_PATH, cache_type); + + return get_bit_mask(mask_path, shareable_mask); +} + +/* + * get_mask_no_shareable - Get Cache Bit Mask (CBM) without shareable bits + * @cache_type: Cache type as "L2" or "L3" + * @mask: The largest exclusive portion of the cache out of the + * full CBM, returned as unsigned long + * + * Parts of a cache may be shared with other devices such as GPU. This function + * calculates the largest exclusive portion of the cache where no other devices + * besides CPU have access to the cache portion. + * + * Return: = 0 on success, < 0 on failure. + */ +int get_mask_no_shareable(const char *cache_type, unsigned long *mask) +{ + unsigned long full_mask, shareable_mask; + unsigned int start, len; + + if (get_full_cbm(cache_type, &full_mask) < 0) + return -1; + if (get_shareable_mask(cache_type, &shareable_mask) < 0) + return -1; + + len = count_contiguous_bits(full_mask & ~shareable_mask, &start); + if (!len) + return -1; + + *mask = create_bit_mask(start, len); + + return 0; } /* * taskset_benchmark - Taskset PID (i.e. benchmark) to a specified cpu - * @bm_pid: PID that should be binded - * @cpu_no: CPU number at which the PID would be binded + * @bm_pid: PID that should be binded + * @cpu_no: CPU number at which the PID would be binded + * @old_affinity: When not NULL, set to old CPU affinity * - * Return: 0 on success, non-zero on failure + * Return: 0 on success, < 0 on error. */ -int taskset_benchmark(pid_t bm_pid, int cpu_no) +int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity) { cpu_set_t my_set; + if (old_affinity) { + CPU_ZERO(old_affinity); + if (sched_getaffinity(bm_pid, sizeof(*old_affinity), + old_affinity)) { + ksft_perror("Unable to read CPU affinity"); + return -1; + } + } + CPU_ZERO(&my_set); CPU_SET(cpu_no, &my_set); if (sched_setaffinity(bm_pid, sizeof(cpu_set_t), &my_set)) { - perror("Unable to taskset benchmark"); + ksft_perror("Unable to taskset benchmark"); + + return -1; + } + + return 0; +} +/* + * taskset_restore - Taskset PID to the earlier CPU affinity + * @bm_pid: PID that should be reset + * @old_affinity: The old CPU affinity to restore + * + * Return: 0 on success, < 0 on error. + */ +int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity) +{ + if (sched_setaffinity(bm_pid, sizeof(*old_affinity), old_affinity)) { + ksft_perror("Unable to restore CPU affinity"); return -1; } @@ -300,7 +456,7 @@ int taskset_benchmark(pid_t bm_pid, int cpu_no) * @grp: Full path and name of the group * @parent_grp: Full path and name of the parent group * - * Return: 0 on success, non-zero on failure + * Return: 0 on success, < 0 on error. */ static int create_grp(const char *grp_name, char *grp, const char *parent_grp) { @@ -325,7 +481,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp) } closedir(dp); } else { - perror("Unable to open resctrl for group"); + ksft_perror("Unable to open resctrl for group"); return -1; } @@ -333,7 +489,7 @@ static int create_grp(const char *grp_name, char *grp, const char *parent_grp) /* Requested grp doesn't exist, hence create it */ if (found_grp == 0) { if (mkdir(grp, 0) == -1) { - perror("Unable to create group"); + ksft_perror("Unable to create group"); return -1; } @@ -348,12 +504,12 @@ static int write_pid_to_tasks(char *tasks, pid_t pid) fp = fopen(tasks, "w"); if (!fp) { - perror("Failed to open tasks file"); + ksft_perror("Failed to open tasks file"); return -1; } if (fprintf(fp, "%d\n", pid) < 0) { - perror("Failed to wr pid to tasks file"); + ksft_print_msg("Failed to write pid to tasks file\n"); fclose(fp); return -1; @@ -376,7 +532,7 @@ static int write_pid_to_tasks(char *tasks, pid_t pid) * pid is not written, this means that pid is in con_mon grp and hence * should consult con_mon grp's mon_data directory for results. * - * Return: 0 on success, non-zero on failure + * Return: 0 on success, < 0 on error. */ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp, char *resctrl_val) @@ -420,7 +576,7 @@ int write_bm_pid_to_resctrl(pid_t bm_pid, char *ctrlgrp, char *mongrp, out: ksft_print_msg("Writing benchmark parameters to resctrl FS\n"); if (ret) - perror("# writing to resctrlfs"); + ksft_print_msg("Failed writing to resctrlfs\n"); return ret; } @@ -430,23 +586,17 @@ out: * @ctrlgrp: Name of the con_mon grp * @schemata: Schemata that should be updated to * @cpu_no: CPU number that the benchmark PID is binded to - * @resctrl_val: Resctrl feature (Eg: mbm, mba.. etc) + * @resource: Resctrl resource (Eg: MB, L3, L2, etc.) * - * Update schemata of a con_mon grp *only* if requested resctrl feature is + * Update schemata of a con_mon grp *only* if requested resctrl resource is * allocation type * - * Return: 0 on success, non-zero on failure + * Return: 0 on success, < 0 on error. */ -int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val) +int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, const char *resource) { char controlgroup[1024], reason[128], schema[1024] = {}; - int resource_id, fd, schema_len = -1, ret = 0; - - if (strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) && - strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) && - strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) && - strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) - return -ENOENT; + int domain_id, fd, schema_len, ret = 0; if (!schemata) { ksft_print_msg("Skipping empty schemata update\n"); @@ -454,8 +604,8 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val) return -1; } - if (get_resource_id(cpu_no, &resource_id) < 0) { - sprintf(reason, "Failed to get resource id"); + if (get_domain_id(resource, cpu_no, &domain_id) < 0) { + sprintf(reason, "Failed to get domain ID"); ret = -1; goto out; @@ -466,14 +616,8 @@ int write_schemata(char *ctrlgrp, char *schemata, int cpu_no, char *resctrl_val) else sprintf(controlgroup, "%s/schemata", RESCTRL_PATH); - if (!strncmp(resctrl_val, CAT_STR, sizeof(CAT_STR)) || - !strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) - schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n", - "L3:", resource_id, '=', schemata); - if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) || - !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) - schema_len = snprintf(schema, sizeof(schema), "%s%d%c%s\n", - "MB:", resource_id, '=', schemata); + schema_len = snprintf(schema, sizeof(schema), "%s:%d=%s\n", + resource, domain_id, schemata); if (schema_len < 0 || schema_len >= sizeof(schema)) { snprintf(reason, sizeof(reason), "snprintf() failed with return value : %d", schema_len); @@ -564,20 +708,16 @@ char *fgrep(FILE *inf, const char *str) } /* - * validate_resctrl_feature_request - Check if requested feature is valid. - * @resource: Required resource (e.g., MB, L3, L2, L3_MON, etc.) - * @feature: Required monitor feature (in mon_features file). Can only be - * set for L3_MON. Must be NULL for all other resources. + * resctrl_resource_exists - Check if a resource is supported. + * @resource: Resctrl resource (e.g., MB, L3, L2, L3_MON, etc.) * - * Return: True if the resource/feature is supported, else false. False is + * Return: True if the resource is supported, else false. False is * also returned if resctrl FS is not mounted. */ -bool validate_resctrl_feature_request(const char *resource, const char *feature) +bool resctrl_resource_exists(const char *resource) { char res_path[PATH_MAX]; struct stat statbuf; - char *res; - FILE *inf; int ret; if (!resource) @@ -592,8 +732,25 @@ bool validate_resctrl_feature_request(const char *resource, const char *feature) if (stat(res_path, &statbuf)) return false; - if (!feature) - return true; + return true; +} + +/* + * resctrl_mon_feature_exists - Check if requested monitoring feature is valid. + * @resource: Resource that uses the mon_features file. Currently only L3_MON + * is valid. + * @feature: Required monitor feature (in mon_features file). + * + * Return: True if the feature is supported, else false. + */ +bool resctrl_mon_feature_exists(const char *resource, const char *feature) +{ + char res_path[PATH_MAX]; + char *res; + FILE *inf; + + if (!feature || !resource) + return false; snprintf(res_path, sizeof(res_path), "%s/%s/mon_features", INFO_PATH, resource); inf = fopen(res_path, "r"); @@ -607,6 +764,36 @@ bool validate_resctrl_feature_request(const char *resource, const char *feature) return !!res; } +/* + * resource_info_file_exists - Check if a file is present inside + * /sys/fs/resctrl/info/@resource. + * @resource: Required resource (Eg: MB, L3, L2, etc.) + * @file: Required file. + * + * Return: True if the /sys/fs/resctrl/info/@resource/@file exists, else false. + */ +bool resource_info_file_exists(const char *resource, const char *file) +{ + char res_path[PATH_MAX]; + struct stat statbuf; + + if (!file || !resource) + return false; + + snprintf(res_path, sizeof(res_path), "%s/%s/%s", INFO_PATH, resource, + file); + + if (stat(res_path, &statbuf)) + return false; + + return true; +} + +bool test_resource_feature_check(const struct resctrl_test *test) +{ + return resctrl_resource_exists(test->resource); +} + int filter_dmesg(void) { char line[1024]; @@ -617,7 +804,7 @@ int filter_dmesg(void) ret = pipe(pipefds); if (ret) { - perror("pipe"); + ksft_perror("pipe"); return ret; } fflush(stdout); @@ -626,13 +813,13 @@ int filter_dmesg(void) close(pipefds[0]); dup2(pipefds[1], STDOUT_FILENO); execlp("dmesg", "dmesg", NULL); - perror("executing dmesg"); + ksft_perror("Executing dmesg"); exit(1); } close(pipefds[1]); fp = fdopen(pipefds[0], "r"); if (!fp) { - perror("fdopen(pipe)"); + ksft_perror("fdopen(pipe)"); kill(pid, SIGTERM); return -1; |