diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:46 +0000 |
commit | 7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch) | |
tree | bcc69b5f4609f348fac49e2f59e210b29eaea783 /tools/perf/arch | |
parent | Adding upstream version 6.9.12. (diff) | |
download | linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip |
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/arch')
-rw-r--r-- | tools/perf/arch/arm/util/cs-etm.c | 381 | ||||
-rw-r--r-- | tools/perf/arch/arm64/util/arm-spe.c | 4 | ||||
-rw-r--r-- | tools/perf/arch/arm64/util/header.c | 13 | ||||
-rw-r--r-- | tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl | 1 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/entry/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/skip-callchain-idx.c | 8 | ||||
-rw-r--r-- | tools/perf/arch/s390/entry/syscalls/syscall.tbl | 1 | ||||
-rw-r--r-- | tools/perf/arch/x86/Build | 14 | ||||
-rw-r--r-- | tools/perf/arch/x86/entry/syscalls/syscall_64.tbl | 3 | ||||
-rw-r--r-- | tools/perf/arch/x86/tests/Build | 14 | ||||
-rwxr-xr-x | tools/perf/arch/x86/tests/gen-insn-x86-dat.sh | 2 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-bts.c | 4 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 25 |
13 files changed, 243 insertions, 228 deletions
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 77e6663c17..da62313679 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -66,18 +66,30 @@ static const char * const metadata_ete_ro[] = { [CS_ETE_TS_SOURCE] = "ts_source", }; -static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu); -static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu); +enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE }; -static int cs_etm_validate_context_id(struct auxtrace_record *itr, - struct evsel *evsel, int cpu) +static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu); +static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val); +static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path); + +static enum cs_etm_version cs_etm_get_version(struct perf_pmu *cs_etm_pmu, + struct perf_cpu cpu) +{ + if (cs_etm_is_ete(cs_etm_pmu, cpu)) + return CS_ETE; + else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0])) + return CS_ETMV4; + else if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER])) + return CS_ETMV3; + + return CS_NOT_PRESENT; +} + +static int cs_etm_validate_context_id(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, + struct perf_cpu cpu) { - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - char path[PATH_MAX]; int err; - u32 val; + __u64 val; u64 contextid = evsel->core.attr.config & (perf_pmu__format_bits(cs_etm_pmu, "contextid") | perf_pmu__format_bits(cs_etm_pmu, "contextid1") | @@ -87,23 +99,16 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr, return 0; /* Not supported in etmv3 */ - if (!cs_etm_is_etmv4(itr, cpu)) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n", CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); return -EINVAL; } /* Get a handle on TRCIDR2 */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); - err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* There was a problem reading the file, bailing out */ - if (err != 1) { - pr_err("%s: can't read file %s\n", CORESIGHT_ETM_PMU_NAME, - path); + err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], &val); + if (err) return err; - } if (contextid & perf_pmu__format_bits(cs_etm_pmu, "contextid1")) { @@ -140,37 +145,26 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr, return 0; } -static int cs_etm_validate_timestamp(struct auxtrace_record *itr, - struct evsel *evsel, int cpu) +static int cs_etm_validate_timestamp(struct perf_pmu *cs_etm_pmu, struct evsel *evsel, + struct perf_cpu cpu) { - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - char path[PATH_MAX]; int err; - u32 val; + __u64 val; if (!(evsel->core.attr.config & perf_pmu__format_bits(cs_etm_pmu, "timestamp"))) return 0; - if (!cs_etm_is_etmv4(itr, cpu)) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_ETMV3) { pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n", CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME); return -EINVAL; } /* Get a handle on TRCIRD0 */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* There was a problem reading the file, bailing out */ - if (err != 1) { - pr_err("%s: can't read file %s\n", - CORESIGHT_ETM_PMU_NAME, path); + err = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], &val); + if (err) return err; - } /* * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping @@ -187,6 +181,13 @@ static int cs_etm_validate_timestamp(struct auxtrace_record *itr, return 0; } +static struct perf_pmu *cs_etm_get_pmu(struct auxtrace_record *itr) +{ + struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); + + return ptr->cs_etm_pmu; +} + /* * Check whether the requested timestamp and contextid options should be * available on all requested CPUs and if not, tell the user how to override. @@ -194,41 +195,45 @@ static int cs_etm_validate_timestamp(struct auxtrace_record *itr, * first is better. In theory the kernel could still disable the option for * some other reason so this is best effort only. */ -static int cs_etm_validate_config(struct auxtrace_record *itr, +static int cs_etm_validate_config(struct perf_pmu *cs_etm_pmu, struct evsel *evsel) { - int i, err = -EINVAL; + int idx, err = 0; struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); + struct perf_cpu_map *intersect_cpus; + struct perf_cpu cpu; - /* Set option of each CPU we have */ - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - /* - * In per-cpu case, do the validation for CPUs to work with. - * In per-thread case, the CPU map is empty. Since the traced - * program can run on any CPUs in this case, thus don't skip - * validation. - */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) && - !perf_cpu_map__has(event_cpus, cpu)) - continue; + /* + * Set option of each CPU we have. In per-cpu case, do the validation + * for CPUs to work with. In per-thread case, the CPU map has the "any" + * CPU value. Since the traced program can run on any CPUs in this case, + * thus don't skip validation. + */ + if (!perf_cpu_map__has_any_cpu(event_cpus)) { + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - if (!perf_cpu_map__has(online_cpus, cpu)) - continue; + intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); + perf_cpu_map__put(online_cpus); + } else { + intersect_cpus = perf_cpu_map__new_online_cpus(); + } - err = cs_etm_validate_context_id(itr, evsel, i); + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { + if (cs_etm_get_version(cs_etm_pmu, cpu) == CS_NOT_PRESENT) { + pr_err("%s: Not found on CPU %d. Check hardware and firmware support and that all Coresight drivers are loaded\n", + CORESIGHT_ETM_PMU_NAME, cpu.cpu); + return -EINVAL; + } + err = cs_etm_validate_context_id(cs_etm_pmu, evsel, cpu); if (err) - goto out; - err = cs_etm_validate_timestamp(itr, evsel, i); + break; + + err = cs_etm_validate_timestamp(cs_etm_pmu, evsel, cpu); if (err) - goto out; + break; } - err = 0; -out: - perf_cpu_map__put(online_cpus); + perf_cpu_map__put(intersect_cpus); return err; } @@ -435,7 +440,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, * Also the case of per-cpu mmaps, need the contextID in order to be notified * when a context switch happened. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, "timestamp", 1); evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, @@ -461,10 +466,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(evsel, TIME); - err = cs_etm_validate_config(itr, cs_etm_evsel); + err = cs_etm_validate_config(cs_etm_pmu, cs_etm_evsel); out: return err; } @@ -530,48 +535,35 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr) } static size_t -cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, - struct evlist *evlist __maybe_unused) +cs_etm_info_priv_size(struct auxtrace_record *itr, + struct evlist *evlist) { - int i; + int idx; int etmv3 = 0, etmv4 = 0, ete = 0; struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - - /* cpu map is not empty, we have specific CPUs to work with */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; + struct perf_cpu_map *intersect_cpus; + struct perf_cpu cpu; + struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); - if (!perf_cpu_map__has(event_cpus, cpu) || - !perf_cpu_map__has(online_cpus, cpu)) - continue; + if (!perf_cpu_map__has_any_cpu(event_cpus)) { + /* cpu map is not "any" CPU , we have specific CPUs to work with */ + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); - if (cs_etm_is_ete(itr, i)) - ete++; - else if (cs_etm_is_etmv4(itr, i)) - etmv4++; - else - etmv3++; - } + intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); + perf_cpu_map__put(online_cpus); } else { - /* get configuration for all CPUs in the system */ - for (i = 0; i < cpu__max_cpu().cpu; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (!perf_cpu_map__has(online_cpus, cpu)) - continue; - - if (cs_etm_is_ete(itr, i)) - ete++; - else if (cs_etm_is_etmv4(itr, i)) - etmv4++; - else - etmv3++; - } + /* Event can be "any" CPU so count all online CPUs. */ + intersect_cpus = perf_cpu_map__new_online_cpus(); } + /* Count number of each type of ETM. Don't count if that CPU has CS_NOT_PRESENT. */ + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, intersect_cpus) { + enum cs_etm_version v = cs_etm_get_version(cs_etm_pmu, cpu); - perf_cpu_map__put(online_cpus); + ete += v == CS_ETE; + etmv4 += v == CS_ETMV4; + etmv3 += v == CS_ETMV3; + } + perf_cpu_map__put(intersect_cpus); return (CS_ETM_HEADER_SIZE + (ete * CS_ETE_PRIV_SIZE) + @@ -579,66 +571,49 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, (etmv3 * CS_ETMV3_PRIV_SIZE)); } -static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu) -{ - bool ret = false; - char path[PATH_MAX]; - int scan; - unsigned int val; - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - - /* Take any of the RO files for ETMv4 and see if it present */ - snprintf(path, PATH_MAX, "cpu%d/%s", - cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val); - - /* The file was read successfully, we have a winner */ - if (scan == 1) - ret = true; - - return ret; -} - -static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path) +static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val) { char pmu_path[PATH_MAX]; int scan; - unsigned int val = 0; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); - scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val); - if (scan != 1) + scan = perf_pmu__scan_file(pmu, pmu_path, "%llx", val); + if (scan != 1) { pr_err("%s: error reading: %s\n", __func__, pmu_path); + return -EINVAL; + } - return val; + return 0; } -static int cs_etm_get_ro_signed(struct perf_pmu *pmu, int cpu, const char *path) +static int cs_etm_get_ro_signed(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, + __u64 *out_val) { char pmu_path[PATH_MAX]; int scan; int val = 0; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val); - if (scan != 1) + if (scan != 1) { pr_err("%s: error reading: %s\n", __func__, pmu_path); + return -EINVAL; + } - return val; + *out_val = (__u64) val; + return 0; } -static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *path) +static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path) { char pmu_path[PATH_MAX]; /* Get RO metadata from sysfs */ - snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path); + snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu.cpu, path); return perf_pmu__file_exists(pmu, pmu_path); } @@ -651,16 +626,14 @@ static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *pa #define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12) #define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT) -static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu) +static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu) { - struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; - int trcdevarch; + __u64 trcdevarch; if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH])) return false; - trcdevarch = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], &trcdevarch); /* * ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13. * See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h @@ -668,7 +641,12 @@ static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu) return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13; } -static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, int cpu) +static __u64 cs_etm_get_legacy_trace_id(struct perf_cpu cpu) +{ + return CORESIGHT_LEGACY_CPU_TRACE_ID(cpu.cpu); +} + +static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) { struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; @@ -676,33 +654,32 @@ static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, /* Get trace configuration register */ data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr); /* traceID set to legacy version, in case new perf running on older system */ - data[CS_ETMV4_TRCTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + data[CS_ETMV4_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | + CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - data[CS_ETMV4_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR0]); - data[CS_ETMV4_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR1]); - data[CS_ETMV4_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR2]); - data[CS_ETMV4_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCIDR8]); - data[CS_ETMV4_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0], + &data[CS_ETMV4_TRCIDR0]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR1], + &data[CS_ETMV4_TRCIDR1]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2], + &data[CS_ETMV4_TRCIDR2]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR8], + &data[CS_ETMV4_TRCIDR8]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS], + &data[CS_ETMV4_TRCAUTHSTATUS]); /* Kernels older than 5.19 may not expose ts_source */ - if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE])) - data[CS_ETMV4_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu, - metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]); - else { + if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]) || + cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE], + &data[CS_ETMV4_TS_SOURCE])) { pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", - cpu); + cpu.cpu); data[CS_ETMV4_TS_SOURCE] = (__u64) -1; } } -static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, int cpu) +static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, struct perf_cpu cpu) { struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; @@ -710,83 +687,85 @@ static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, in /* Get trace configuration register */ data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr); /* traceID set to legacy version, in case new perf running on older system */ - data[CS_ETE_TRCTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + data[CS_ETE_TRCTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - data[CS_ETE_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR0]); - data[CS_ETE_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR1]); - data[CS_ETE_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR2]); - data[CS_ETE_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCIDR8]); - data[CS_ETE_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCAUTHSTATUS]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR0], &data[CS_ETE_TRCIDR0]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR1], &data[CS_ETE_TRCIDR1]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR2], &data[CS_ETE_TRCIDR2]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCIDR8], &data[CS_ETE_TRCIDR8]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCAUTHSTATUS], + &data[CS_ETE_TRCAUTHSTATUS]); /* ETE uses the same registers as ETMv4 plus TRCDEVARCH */ - data[CS_ETE_TRCDEVARCH] = cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TRCDEVARCH]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH], + &data[CS_ETE_TRCDEVARCH]); /* Kernels older than 5.19 may not expose ts_source */ - if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE])) - data[CS_ETE_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu, - metadata_ete_ro[CS_ETE_TS_SOURCE]); - else { + if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]) || + cs_etm_get_ro_signed(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE], + &data[CS_ETE_TS_SOURCE])) { pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n", - cpu); + cpu.cpu); data[CS_ETE_TS_SOURCE] = (__u64) -1; } } -static void cs_etm_get_metadata(int cpu, u32 *offset, +static void cs_etm_get_metadata(struct perf_cpu cpu, u32 *offset, struct auxtrace_record *itr, struct perf_record_auxtrace_info *info) { u32 increment, nr_trc_params; u64 magic; - struct cs_etm_recording *ptr = - container_of(itr, struct cs_etm_recording, itr); - struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_pmu *cs_etm_pmu = cs_etm_get_pmu(itr); /* first see what kind of tracer this cpu is affined to */ - if (cs_etm_is_ete(itr, cpu)) { + switch (cs_etm_get_version(cs_etm_pmu, cpu)) { + case CS_ETE: magic = __perf_cs_ete_magic; cs_etm_save_ete_header(&info->priv[*offset], itr, cpu); /* How much space was used */ increment = CS_ETE_PRIV_MAX; nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1; - } else if (cs_etm_is_etmv4(itr, cpu)) { + break; + + case CS_ETMV4: magic = __perf_cs_etmv4_magic; cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu); /* How much space was used */ increment = CS_ETMV4_PRIV_MAX; nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR; - } else { + break; + + case CS_ETMV3: magic = __perf_cs_etmv3_magic; /* Get configuration register */ info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr); /* traceID set to legacy value in case new perf running on old system */ - info->priv[*offset + CS_ETM_ETMTRACEIDR] = - CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG; + info->priv[*offset + CS_ETM_ETMTRACEIDR] = cs_etm_get_legacy_trace_id(cpu) | + CORESIGHT_TRACE_ID_UNUSED_FLAG; /* Get read-only information from sysFS */ - info->priv[*offset + CS_ETM_ETMCCER] = - cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv3_ro[CS_ETM_ETMCCER]); - info->priv[*offset + CS_ETM_ETMIDR] = - cs_etm_get_ro(cs_etm_pmu, cpu, - metadata_etmv3_ro[CS_ETM_ETMIDR]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMCCER], + &info->priv[*offset + CS_ETM_ETMCCER]); + cs_etm_get_ro(cs_etm_pmu, cpu, metadata_etmv3_ro[CS_ETM_ETMIDR], + &info->priv[*offset + CS_ETM_ETMIDR]); /* How much space was used */ increment = CS_ETM_PRIV_MAX; nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR; + break; + + default: + case CS_NOT_PRESENT: + /* Unreachable, CPUs already validated in cs_etm_validate_config() */ + assert(true); + return; } /* Build generic header portion */ info->priv[*offset + CS_ETM_MAGIC] = magic; - info->priv[*offset + CS_ETM_CPU] = cpu; + info->priv[*offset + CS_ETM_CPU] = cpu.cpu; info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params; /* Where the next CPU entry should start from */ *offset += increment; @@ -806,6 +785,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; + struct perf_cpu cpu; if (priv_size != cs_etm_info_priv_size(itr, session->evlist)) return -EINVAL; @@ -813,16 +793,13 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, if (!session->evlist->core.nr_mmaps) return -EINVAL; - /* If the cpu_map is empty all online CPUs are involved */ - if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { + /* If the cpu_map has the "any" CPU all online CPUs are involved */ + if (perf_cpu_map__has_any_cpu(event_cpus)) { cpu_map = online_cpus; } else { /* Make sure all specified CPUs are online */ - for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (perf_cpu_map__has(event_cpus, cpu) && - !perf_cpu_map__has(online_cpus, cpu)) + perf_cpu_map__for_each_cpu(cpu, i, event_cpus) { + if (!perf_cpu_map__has(online_cpus, cpu)) return -EINVAL; } @@ -842,11 +819,9 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, offset = CS_ETM_SNAPSHOT + 1; - for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) { - struct perf_cpu cpu = { .cpu = i, }; - - if (perf_cpu_map__has(cpu_map, cpu)) - cs_etm_get_metadata(i, &offset, itr, info); + perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { + assert(offset < priv_size); + cs_etm_get_metadata(cpu, &offset, itr, info); } perf_cpu_map__put(online_cpus); diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index 51ccbfd3d2..0b52e67edb 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, sample CPU for AUX event; * also enable the timestamp tracing for samples correlation. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(arm_spe_evsel, CPU); evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel, "ts_enable", 1); @@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, tracking_evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); evsel__set_sample_bit(tracking_evsel, CPU); diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c index 9703749915..741df3614a 100644 --- a/tools/perf/arch/arm64/util/header.c +++ b/tools/perf/arch/arm64/util/header.c @@ -4,8 +4,6 @@ #include <stdio.h> #include <stdlib.h> #include <perf/cpumap.h> -#include <util/cpumap.h> -#include <internal/cpumap.h> #include <api/fs/fs.h> #include <errno.h> #include "debug.h" @@ -19,20 +17,18 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) { const char *sysfs = sysfs__mountpoint(); - int cpu; - int ret = EINVAL; + struct perf_cpu cpu; + int idx, ret = EINVAL; if (!sysfs || sz < MIDR_SIZE) return EINVAL; - cpus = perf_cpu_map__get(cpus); - - for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) { + perf_cpu_map__for_each_cpu(cpu, idx, cpus) { char path[PATH_MAX]; FILE *file; scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR, - sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu); + sysfs, cpu.cpu); file = fopen(path, "r"); if (!file) { @@ -51,7 +47,6 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) break; } - perf_cpu_map__put(cpus); return ret; } diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl index 532b855df5..1464c6be6e 100644 --- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl +++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl @@ -376,3 +376,4 @@ 459 n64 lsm_get_self_attr sys_lsm_get_self_attr 460 n64 lsm_set_self_attr sys_lsm_set_self_attr 461 n64 lsm_list_modules sys_lsm_list_modules +462 n64 mseal sys_mseal diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 17173b82ca..3656f1ca7a 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -548,3 +548,4 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 5f3edb3004..356786432f 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -159,9 +159,9 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) Dwarf_Addr start = pc; Dwarf_Addr end = pc; bool signalp; - const char *exec_file = dso->long_name; + const char *exec_file = dso__long_name(dso); - dwfl = dso->dwfl; + dwfl = RC_CHK_ACCESS(dso)->dwfl; if (!dwfl) { dwfl = dwfl_begin(&offline_callbacks); @@ -183,7 +183,7 @@ static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) dwfl_end(dwfl); goto out; } - dso->dwfl = dwfl; + RC_CHK_ACCESS(dso)->dwfl = dwfl; } mod = dwfl_addrmodule(dwfl, pc); @@ -267,7 +267,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) rc = check_return_addr(dso, map__start(al.map), ip); pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n", - dso->long_name, al.sym->name, ip, rc); + dso__long_name(dso), al.sym->name, ip, rc); if (rc == 0) { /* diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index 095bb86339..bd0fee24ad 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -464,3 +464,4 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal sys_mseal diff --git a/tools/perf/arch/x86/Build b/tools/perf/arch/x86/Build index a7dd46a5b6..ed37013b42 100644 --- a/tools/perf/arch/x86/Build +++ b/tools/perf/arch/x86/Build @@ -1,2 +1,16 @@ perf-y += util/ perf-y += tests/ + +ifdef SHELLCHECK + SHELL_TESTS := entry/syscalls/syscalltbl.sh + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 7e8d46f414..a396f6e6ab 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -374,7 +374,7 @@ 450 common set_mempolicy_home_node sys_set_mempolicy_home_node 451 common cachestat sys_cachestat 452 common fchmodat2 sys_fchmodat2 -453 64 map_shadow_stack sys_map_shadow_stack +453 common map_shadow_stack sys_map_shadow_stack 454 common futex_wake sys_futex_wake 455 common futex_wait sys_futex_wait 456 common futex_requeue sys_futex_requeue @@ -383,6 +383,7 @@ 459 common lsm_get_self_attr sys_lsm_get_self_attr 460 common lsm_set_self_attr sys_lsm_set_self_attr 461 common lsm_list_modules sys_lsm_list_modules +462 common mseal sys_mseal # # Due to a historical design error, certain syscalls are numbered differently diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build index b87f46e5fe..c1e3b7d395 100644 --- a/tools/perf/arch/x86/tests/Build +++ b/tools/perf/arch/x86/tests/Build @@ -10,3 +10,17 @@ perf-$(CONFIG_AUXTRACE) += insn-x86.o endif perf-$(CONFIG_X86_64) += bp-modify.o perf-y += amd-ibs-via-core-pmu.o + +ifdef SHELLCHECK + SHELL_TESTS := gen-insn-x86-dat.sh + TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh index 0d0a003a9c..89c46532cd 100755 --- a/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh +++ b/tools/perf/arch/x86/tests/gen-insn-x86-dat.sh @@ -11,7 +11,7 @@ if [ "$(uname -m)" != "x86_64" ]; then exit 1 fi -cd $(dirname $0) +cd "$(dirname $0)" trap 'echo "Might need a more recent version of binutils"' EXIT diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index af8ae46475..34696f3d3d 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, if (!opts->full_auxtrace) return 0; - if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (opts->full_auxtrace && !perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); return -EINVAL; } @@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_bts_evsel, CPU); } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index d199619df3..4b710e8759 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -32,6 +32,7 @@ #include "../../../util/tsc.h" #include <internal/lib.h> // page_size #include "../../../util/intel-pt.h" +#include <api/fs/fs.h> #define KiB(x) ((x) * 1024) #define MiB(x) ((x) * 1024 * 1024) @@ -369,7 +370,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); + per_cpu_mmaps = !perf_cpu_map__is_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -428,6 +429,16 @@ static int intel_pt_track_switches(struct evlist *evlist) } #endif +static bool intel_pt_exclude_guest(void) +{ + int pt_mode; + + if (sysfs__read_int("module/kvm_intel/parameters/pt_mode", &pt_mode)) + pt_mode = 0; + + return pt_mode == 1; +} + static void intel_pt_valid_str(char *str, size_t len, u64 valid) { unsigned int val, last = 0, state = 1; @@ -620,6 +631,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->core.attr.exclude_guest = intel_pt_exclude_guest(); evsel->no_aux_samples = true; evsel->needs_auxtrace_mmap = true; intel_pt_evsel = evsel; @@ -758,7 +770,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) { - u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4; + size_t aw = opts->auxtrace_mmap_pages * (size_t)page_size / 4; + u32 aux_watermark = aw > UINT_MAX ? UINT_MAX : aw; intel_pt_evsel->core.attr.aux_watermark = aux_watermark; } @@ -774,7 +787,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (have_timing_info && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !record_opts__no_switch_events(opts)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && @@ -832,7 +845,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_pt_evsel, CPU); } @@ -858,7 +871,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { + if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ evsel__set_sample_bit(tracking_evsel, CPU); @@ -870,7 +883,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only). */ - if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && + if (!ptr->have_sched_switch && !perf_cpu_map__is_any_cpu_or_is_empty(cpus) && !target__none(&opts->target) && !intel_pt_evsel->core.attr.exclude_user) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); |