summaryrefslogtreecommitdiffstats
path: root/tools/perf/arch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /tools/perf/arch
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/arch')
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c16
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c4
-rw-r--r--tools/perf/arch/arm64/util/header.c2
-rw-r--r--tools/perf/arch/loongarch/annotate/instructions.c6
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl5
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl5
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl5
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl5
-rw-r--r--tools/perf/arch/x86/tests/hybrid.c37
-rw-r--r--tools/perf/arch/x86/util/dwarf-regs.c38
-rw-r--r--tools/perf/arch/x86/util/event.c103
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c10
13 files changed, 167 insertions, 73 deletions
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2cf873d71d..77e6663c17 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -199,7 +199,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
{
int i, err = -EINVAL;
struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
/* Set option of each CPU we have */
for (i = 0; i < cpu__max_cpu().cpu; i++) {
@@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr,
* program can run on any CPUs in this case, thus don't skip
* validation.
*/
- if (!perf_cpu_map__empty(event_cpus) &&
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) &&
!perf_cpu_map__has(event_cpus, cpu))
continue;
@@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* Also the case of per-cpu mmaps, need the contextID in order to be notified
* when a context switch happened.
*/
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
"timestamp", 1);
evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
@@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(evsel, TIME);
err = cs_etm_validate_config(itr, cs_etm_evsel);
@@ -536,10 +536,10 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
int i;
int etmv3 = 0, etmv4 = 0, ete = 0;
struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
/* cpu map is not empty, we have specific CPUs to work with */
- if (!perf_cpu_map__empty(event_cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
for (i = 0; i < cpu__max_cpu().cpu; i++) {
struct perf_cpu cpu = { .cpu = i, };
@@ -802,7 +802,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
- struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
@@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
return -EINVAL;
/* If the cpu_map is empty all online CPUs are involved */
- if (perf_cpu_map__empty(event_cpus)) {
+ if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) {
cpu_map = online_cpus;
} else {
/* Make sure all specified CPUs are online */
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index e3acc739bd..51ccbfd3d2 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, sample CPU for AUX event;
* also enable the timestamp tracing for samples correlation.
*/
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(arm_spe_evsel, CPU);
evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
"ts_enable", 1);
@@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
tracking_evsel->core.attr.sample_period = 1;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
evsel__set_sample_bit(tracking_evsel, CPU);
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index a2eef9ec54..9703749915 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -57,7 +57,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
int get_cpuid(char *buf, size_t sz)
{
- struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+ struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
int ret;
if (!cpus)
diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c
index 98e19c5366..21cc7e4149 100644
--- a/tools/perf/arch/loongarch/annotate/instructions.c
+++ b/tools/perf/arch/loongarch/annotate/instructions.c
@@ -61,10 +61,10 @@ static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, st
const char *c = strchr(ops->raw, '#');
u64 start, end;
- ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
- ops->raw_func_start = strchr(ops->raw, '<');
+ ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ ops->jump.raw_func_start = strchr(ops->raw, '<');
- if (ops->raw_func_start && c > ops->raw_func_start)
+ if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
c = NULL;
if (c++ != NULL)
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 116ff501bf..532b855df5 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -371,3 +371,8 @@
454 n64 futex_wake sys_futex_wake
455 n64 futex_wait sys_futex_wait
456 n64 futex_requeue sys_futex_requeue
+457 n64 statmount sys_statmount
+458 n64 listmount sys_listmount
+459 n64 lsm_get_self_attr sys_lsm_get_self_attr
+460 n64 lsm_set_self_attr sys_lsm_set_self_attr
+461 n64 lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 7fab411378..17173b82ca 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -543,3 +543,8 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 86fec9b080..095bb86339 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -459,3 +459,8 @@
454 common futex_wake sys_futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue sys_futex_requeue
+457 common statmount sys_statmount sys_statmount
+458 common listmount sys_listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 8cb8bf6872..7e8d46f414 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -378,6 +378,11 @@
454 common futex_wake sys_futex_wake
455 common futex_wait sys_futex_wait
456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/x86/tests/hybrid.c b/tools/perf/arch/x86/tests/hybrid.c
index eb152770f1..40f5d17fed 100644
--- a/tools/perf/arch/x86/tests/hybrid.c
+++ b/tools/perf/arch/x86/tests/hybrid.c
@@ -47,7 +47,7 @@ static int test__hybrid_hw_group_event(struct evlist *evlist)
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
return TEST_OK;
}
@@ -102,7 +102,7 @@ static int test__hybrid_group_modifier1(struct evlist *evlist)
evsel = evsel__next(evsel);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
- TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS));
TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
@@ -163,6 +163,24 @@ static int test__checkevent_pmu(struct evlist *evlist)
return TEST_OK;
}
+static int test__hybrid_hw_group_event_2(struct evlist *evlist)
+{
+ struct evsel *evsel, *leader;
+
+ evsel = leader = evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW));
+ TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES));
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+
+ evsel = evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type);
+ TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == 0x3c);
+ TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader));
+ return TEST_OK;
+}
+
struct evlist_test {
const char *name;
bool (*valid)(void);
@@ -171,27 +189,27 @@ struct evlist_test {
static const struct evlist_test test__hybrid_events[] = {
{
- .name = "cpu_core/cpu-cycles/",
+ .name = "cpu_core/cycles/",
.check = test__hybrid_hw_event_with_pmu,
/* 0 */
},
{
- .name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}",
+ .name = "{cpu_core/cycles/,cpu_core/branches/}",
.check = test__hybrid_hw_group_event,
/* 1 */
},
{
- .name = "{cpu-clock,cpu_core/cpu-cycles/}",
+ .name = "{cpu-clock,cpu_core/cycles/}",
.check = test__hybrid_sw_hw_group_event,
/* 2 */
},
{
- .name = "{cpu_core/cpu-cycles/,cpu-clock}",
+ .name = "{cpu_core/cycles/,cpu-clock}",
.check = test__hybrid_hw_sw_group_event,
/* 3 */
},
{
- .name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}",
+ .name = "{cpu_core/cycles/k,cpu_core/branches/u}",
.check = test__hybrid_group_modifier1,
/* 4 */
},
@@ -215,6 +233,11 @@ static const struct evlist_test test__hybrid_events[] = {
.check = test__hybrid_cache_event,
/* 8 */
},
+ {
+ .name = "{cpu_core/cycles/,cpu_core/cpu-cycles/}",
+ .check = test__hybrid_hw_group_event_2,
+ /* 9 */
+ },
};
static int test_event(const struct evlist_test *e)
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 5309348057..399c4a0a29 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -113,3 +113,41 @@ int regs_query_register_offset(const char *name)
return roff->offset;
return -EINVAL;
}
+
+struct dwarf_regs_idx {
+ const char *name;
+ int idx;
+};
+
+static const struct dwarf_regs_idx x86_regidx_table[] = {
+ { "rax", 0 }, { "eax", 0 }, { "ax", 0 }, { "al", 0 },
+ { "rdx", 1 }, { "edx", 1 }, { "dx", 1 }, { "dl", 1 },
+ { "rcx", 2 }, { "ecx", 2 }, { "cx", 2 }, { "cl", 2 },
+ { "rbx", 3 }, { "edx", 3 }, { "bx", 3 }, { "bl", 3 },
+ { "rsi", 4 }, { "esi", 4 }, { "si", 4 }, { "sil", 4 },
+ { "rdi", 5 }, { "edi", 5 }, { "di", 5 }, { "dil", 5 },
+ { "rbp", 6 }, { "ebp", 6 }, { "bp", 6 }, { "bpl", 6 },
+ { "rsp", 7 }, { "esp", 7 }, { "sp", 7 }, { "spl", 7 },
+ { "r8", 8 }, { "r8d", 8 }, { "r8w", 8 }, { "r8b", 8 },
+ { "r9", 9 }, { "r9d", 9 }, { "r9w", 9 }, { "r9b", 9 },
+ { "r10", 10 }, { "r10d", 10 }, { "r10w", 10 }, { "r10b", 10 },
+ { "r11", 11 }, { "r11d", 11 }, { "r11w", 11 }, { "r11b", 11 },
+ { "r12", 12 }, { "r12d", 12 }, { "r12w", 12 }, { "r12b", 12 },
+ { "r13", 13 }, { "r13d", 13 }, { "r13w", 13 }, { "r13b", 13 },
+ { "r14", 14 }, { "r14d", 14 }, { "r14w", 14 }, { "r14b", 14 },
+ { "r15", 15 }, { "r15d", 15 }, { "r15w", 15 }, { "r15b", 15 },
+ { "rip", DWARF_REG_PC },
+};
+
+int get_arch_regnum(const char *name)
+{
+ unsigned int i;
+
+ if (*name != '%')
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(x86_regidx_table); i++)
+ if (!strcmp(x86_regidx_table[i].name, name + 1))
+ return x86_regidx_table[i].idx;
+ return -ENOENT;
+}
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index 5741ffe473..e65b7dbe27 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -14,66 +14,79 @@
#if defined(__x86_64__)
-int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
+struct perf_event__synthesize_extra_kmaps_cb_args {
+ struct perf_tool *tool;
+ perf_event__handler_t process;
+ struct machine *machine;
+ union perf_event *event;
+};
+
+static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data)
{
- int rc = 0;
- struct map_rb_node *pos;
- struct maps *kmaps = machine__kernel_maps(machine);
- union perf_event *event = zalloc(sizeof(event->mmap) +
- machine->id_hdr_size);
+ struct perf_event__synthesize_extra_kmaps_cb_args *args = data;
+ union perf_event *event = args->event;
+ struct kmap *kmap;
+ size_t size;
- if (!event) {
- pr_debug("Not enough memory synthesizing mmap event "
- "for extra kernel maps\n");
- return -1;
- }
+ if (!__map__is_extra_kernel_map(map))
+ return 0;
- maps__for_each_entry(kmaps, pos) {
- struct kmap *kmap;
- size_t size;
- struct map *map = pos->map;
+ kmap = map__kmap(map);
- if (!__map__is_extra_kernel_map(map))
- continue;
+ size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
+ PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
+ args->machine->id_hdr_size;
- kmap = map__kmap(map);
+ memset(event, 0, size);
- size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
- PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
- machine->id_hdr_size;
+ event->mmap.header.type = PERF_RECORD_MMAP;
- memset(event, 0, size);
+ /*
+ * kernel uses 0 for user space maps, see kernel/perf_event.c
+ * __perf_event_mmap
+ */
+ if (machine__is_host(args->machine))
+ event->header.misc = PERF_RECORD_MISC_KERNEL;
+ else
+ event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- event->mmap.header.type = PERF_RECORD_MMAP;
+ event->mmap.header.size = size;
- /*
- * kernel uses 0 for user space maps, see kernel/perf_event.c
- * __perf_event_mmap
- */
- if (machine__is_host(machine))
- event->header.misc = PERF_RECORD_MISC_KERNEL;
- else
- event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__size(map);
+ event->mmap.pgoff = map__pgoff(map);
+ event->mmap.pid = args->machine->pid;
- event->mmap.header.size = size;
+ strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
- event->mmap.start = map__start(map);
- event->mmap.len = map__size(map);
- event->mmap.pgoff = map__pgoff(map);
- event->mmap.pid = machine->pid;
+ if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
+ return -1;
- strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
+ return 0;
+}
- if (perf_tool__process_synth_event(tool, event, machine,
- process) != 0) {
- rc = -1;
- break;
- }
+int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
+ perf_event__handler_t process,
+ struct machine *machine)
+{
+ int rc;
+ struct maps *kmaps = machine__kernel_maps(machine);
+ struct perf_event__synthesize_extra_kmaps_cb_args args = {
+ .tool = tool,
+ .process = process,
+ .machine = machine,
+ .event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size),
+ };
+
+ if (!args.event) {
+ pr_debug("Not enough memory synthesizing mmap event "
+ "for extra kernel maps\n");
+ return -1;
}
- free(event);
+ rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args);
+
+ free(args.event);
return rc;
}
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index d2c8cac114..af8ae46475 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace)
return 0;
- if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) {
+ if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
return -EINVAL;
}
@@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_bts_evsel, CPU);
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index fa0c718b9e..d199619df3 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n");
}
- per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
+ per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different
* threads.
*/
- if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+ if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) &&
@@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
- if (!perf_cpu_map__empty(cpus))
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus))
evsel__set_sample_bit(intel_pt_evsel, CPU);
}
@@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus)) {
+ if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) {
evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */
evsel__set_sample_bit(tracking_evsel, CPU);
@@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Warn the user when we do not have enough information to decode i.e.
* per-cpu with no sched_switch (except workload-only).
*/
- if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
+ if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) &&
!target__none(&opts->target) &&
!intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");