diff options
Diffstat (limited to '')
-rw-r--r-- | tools/perf/arch/x86/util/Build | 19 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/archinsn.c | 26 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/auxtrace.c | 75 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/dwarf-regs.c | 115 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/event.c | 77 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/header.c | 144 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-bts.c | 449 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 1221 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/kvm-stat.c | 168 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/machine.c | 105 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/perf_regs.c | 302 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/pmu.c | 20 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/topdown.c | 63 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/tsc.c | 13 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/unwind-libdw.c | 53 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/unwind-libunwind.c | 115 |
16 files changed, 2965 insertions, 0 deletions
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build new file mode 100644 index 000000000..347c39b96 --- /dev/null +++ b/tools/perf/arch/x86/util/Build @@ -0,0 +1,19 @@ +perf-y += header.o +perf-y += tsc.o +perf-y += pmu.o +perf-y += kvm-stat.o +perf-y += perf_regs.o +perf-y += topdown.o +perf-y += machine.o +perf-y += event.o + +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o + +perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o + +perf-$(CONFIG_AUXTRACE) += auxtrace.o +perf-$(CONFIG_AUXTRACE) += archinsn.o +perf-$(CONFIG_AUXTRACE) += intel-pt.o +perf-$(CONFIG_AUXTRACE) += intel-bts.o diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c new file mode 100644 index 000000000..3e6791531 --- /dev/null +++ b/tools/perf/arch/x86/util/archinsn.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../../arch/x86/include/asm/insn.h" +#include "archinsn.h" +#include "event.h" +#include "machine.h" +#include "thread.h" +#include "symbol.h" + +void arch_fetch_insn(struct perf_sample *sample, + struct thread *thread, + struct machine *machine) +{ + struct insn insn; + int len; + bool is64bit = false; + + if (!sample->ip) + return; + len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit); + if (len <= 0) + return; + insn_init(&insn, sample->insn, len, is64bit); + insn_get_length(&insn); + if (insn_complete(&insn) && insn.length <= len) + sample->insn_len = insn.length; +} diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c new file mode 100644 index 000000000..3da506e13 --- /dev/null +++ b/tools/perf/arch/x86/util/auxtrace.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * auxtrace.c: AUX area tracing support + * Copyright (c) 2013-2014, Intel Corporation. + */ + +#include <errno.h> +#include <stdbool.h> + +#include "../../../util/header.h" +#include "../../../util/debug.h" +#include "../../../util/pmu.h" +#include "../../../util/auxtrace.h" +#include "../../../util/intel-pt.h" +#include "../../../util/intel-bts.h" +#include "../../../util/evlist.h" + +static +struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist, + int *err) +{ + struct perf_pmu *intel_pt_pmu; + struct perf_pmu *intel_bts_pmu; + struct evsel *evsel; + bool found_pt = false; + bool found_bts = false; + + intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME); + if (intel_pt_pmu) + intel_pt_pmu->auxtrace = true; + intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME); + if (intel_bts_pmu) + intel_bts_pmu->auxtrace = true; + + evlist__for_each_entry(evlist, evsel) { + if (intel_pt_pmu && evsel->core.attr.type == intel_pt_pmu->type) + found_pt = true; + if (intel_bts_pmu && evsel->core.attr.type == intel_bts_pmu->type) + found_bts = true; + } + + if (found_pt && found_bts) { + pr_err("intel_pt and intel_bts may not be used together\n"); + *err = -EINVAL; + return NULL; + } + + if (found_pt) + return intel_pt_recording_init(err); + + if (found_bts) + return intel_bts_recording_init(err); + + return NULL; +} + +struct auxtrace_record *auxtrace_record__init(struct evlist *evlist, + int *err) +{ + char buffer[64]; + int ret; + + *err = 0; + + ret = get_cpuid(buffer, sizeof(buffer)); + if (ret) { + *err = ret; + return NULL; + } + + if (!strncmp(buffer, "GenuineIntel,", 13)) + return auxtrace_record__init_intel(evlist, err); + + return NULL; +} diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c new file mode 100644 index 000000000..530934805 --- /dev/null +++ b/tools/perf/arch/x86/util/dwarf-regs.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * dwarf-regs.c : Mapping of DWARF debug register numbers into register names. + * Extracted from probe-finder.c + * + * Written by Masami Hiramatsu <mhiramat@redhat.com> + */ + +#include <stddef.h> +#include <errno.h> /* for EINVAL */ +#include <string.h> /* for strcmp */ +#include <linux/ptrace.h> /* for struct pt_regs */ +#include <linux/kernel.h> /* for offsetof */ +#include <dwarf-regs.h> + +/* + * See arch/x86/kernel/ptrace.c. + * Different from it: + * + * - Since struct pt_regs is defined differently for user and kernel, + * but we want to use 'ax, bx' instead of 'rax, rbx' (which is struct + * field name of user's pt_regs), we make REG_OFFSET_NAME to accept + * both string name and reg field name. + * + * - Since accessing x86_32's pt_regs from x86_64 building is difficult + * and vise versa, we simply fill offset with -1, so + * get_arch_regstr() still works but regs_query_register_offset() + * returns error. + * The only inconvenience caused by it now is that we are not allowed + * to generate BPF prologue for a x86_64 kernel if perf is built for + * x86_32. This is really a rare usecase. + * + * - Order is different from kernel's ptrace.c for get_arch_regstr(). Use + * the order defined by dwarf. + */ + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define REG_OFFSET_END {.name = NULL, .offset = 0} + +#ifdef __x86_64__ +# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} +# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = -1} +#else +# define REG_OFFSET_NAME_64(n, r) {.name = n, .offset = -1} +# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)} +#endif + +/* TODO: switching by dwarf address size */ +#ifndef __x86_64__ +static const struct pt_regs_offset x86_32_regoffset_table[] = { + REG_OFFSET_NAME_32("%ax", eax), + REG_OFFSET_NAME_32("%cx", ecx), + REG_OFFSET_NAME_32("%dx", edx), + REG_OFFSET_NAME_32("%bx", ebx), + REG_OFFSET_NAME_32("$stack", esp), /* Stack address instead of %sp */ + REG_OFFSET_NAME_32("%bp", ebp), + REG_OFFSET_NAME_32("%si", esi), + REG_OFFSET_NAME_32("%di", edi), + REG_OFFSET_END, +}; + +#define regoffset_table x86_32_regoffset_table +#else +static const struct pt_regs_offset x86_64_regoffset_table[] = { + REG_OFFSET_NAME_64("%ax", rax), + REG_OFFSET_NAME_64("%dx", rdx), + REG_OFFSET_NAME_64("%cx", rcx), + REG_OFFSET_NAME_64("%bx", rbx), + REG_OFFSET_NAME_64("%si", rsi), + REG_OFFSET_NAME_64("%di", rdi), + REG_OFFSET_NAME_64("%bp", rbp), + REG_OFFSET_NAME_64("%sp", rsp), + REG_OFFSET_NAME_64("%r8", r8), + REG_OFFSET_NAME_64("%r9", r9), + REG_OFFSET_NAME_64("%r10", r10), + REG_OFFSET_NAME_64("%r11", r11), + REG_OFFSET_NAME_64("%r12", r12), + REG_OFFSET_NAME_64("%r13", r13), + REG_OFFSET_NAME_64("%r14", r14), + REG_OFFSET_NAME_64("%r15", r15), + REG_OFFSET_END, +}; + +#define regoffset_table x86_64_regoffset_table +#endif + +/* Minus 1 for the ending REG_OFFSET_END */ +#define ARCH_MAX_REGS ((sizeof(regoffset_table) / sizeof(regoffset_table[0])) - 1) + +/* Return architecture dependent register string (for kprobe-tracer) */ +const char *get_arch_regstr(unsigned int n) +{ + return (n < ARCH_MAX_REGS) ? regoffset_table[n].name : NULL; +} + +/* Reuse code from arch/x86/kernel/ptrace.c */ +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c new file mode 100644 index 000000000..047dc00ea --- /dev/null +++ b/tools/perf/arch/x86/util/event.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/string.h> +#include <linux/zalloc.h> + +#include "../../../util/event.h" +#include "../../../util/synthetic-events.h" +#include "../../../util/machine.h" +#include "../../../util/tool.h" +#include "../../../util/map.h" +#include "../../../util/debug.h" + +#if defined(__x86_64__) + +int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, + perf_event__handler_t process, + struct machine *machine) +{ + int rc = 0; + struct map *pos; + struct maps *kmaps = &machine->kmaps; + union perf_event *event = zalloc(sizeof(event->mmap) + + machine->id_hdr_size); + + if (!event) { + pr_debug("Not enough memory synthesizing mmap event " + "for extra kernel maps\n"); + return -1; + } + + maps__for_each_entry(kmaps, pos) { + struct kmap *kmap; + size_t size; + + if (!__map__is_extra_kernel_map(pos)) + continue; + + kmap = map__kmap(pos); + + size = sizeof(event->mmap) - sizeof(event->mmap.filename) + + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + + machine->id_hdr_size; + + memset(event, 0, size); + + event->mmap.header.type = PERF_RECORD_MMAP; + + /* + * kernel uses 0 for user space maps, see kernel/perf_event.c + * __perf_event_mmap + */ + if (machine__is_host(machine)) + event->header.misc = PERF_RECORD_MISC_KERNEL; + else + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + + event->mmap.header.size = size; + + event->mmap.start = pos->start; + event->mmap.len = pos->end - pos->start; + event->mmap.pgoff = pos->pgoff; + event->mmap.pid = machine->pid; + + strlcpy(event->mmap.filename, kmap->name, PATH_MAX); + + if (perf_tool__process_synth_event(tool, event, machine, + process) != 0) { + rc = -1; + break; + } + } + + free(event); + return rc; +} + +#endif diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c new file mode 100644 index 000000000..578c8c568 --- /dev/null +++ b/tools/perf/arch/x86/util/header.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <sys/types.h> +#include <errno.h> +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <regex.h> + +#include "../../../util/debug.h" +#include "../../../util/header.h" + +static inline void +cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c, + unsigned int *d) +{ + __asm__ __volatile__ (".byte 0x53\n\tcpuid\n\t" + "movl %%ebx, %%esi\n\t.byte 0x5b" + : "=a" (*a), + "=S" (*b), + "=c" (*c), + "=d" (*d) + : "a" (op)); +} + +static int +__get_cpuid(char *buffer, size_t sz, const char *fmt) +{ + unsigned int a, b, c, d, lvl; + int family = -1, model = -1, step = -1; + int nb; + char vendor[16]; + + cpuid(0, &lvl, &b, &c, &d); + strncpy(&vendor[0], (char *)(&b), 4); + strncpy(&vendor[4], (char *)(&d), 4); + strncpy(&vendor[8], (char *)(&c), 4); + vendor[12] = '\0'; + + if (lvl >= 1) { + cpuid(1, &a, &b, &c, &d); + + family = (a >> 8) & 0xf; /* bits 11 - 8 */ + model = (a >> 4) & 0xf; /* Bits 7 - 4 */ + step = a & 0xf; + + /* extended family */ + if (family == 0xf) + family += (a >> 20) & 0xff; + + /* extended model */ + if (family >= 0x6) + model += ((a >> 16) & 0xf) << 4; + } + nb = scnprintf(buffer, sz, fmt, vendor, family, model, step); + + /* look for end marker to ensure the entire data fit */ + if (strchr(buffer, '$')) { + buffer[nb-1] = '\0'; + return 0; + } + return ENOBUFS; +} + +int +get_cpuid(char *buffer, size_t sz) +{ + return __get_cpuid(buffer, sz, "%s,%u,%u,%u$"); +} + +char * +get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + char *buf = malloc(128); + + if (buf && __get_cpuid(buf, 128, "%s-%u-%X-%X$") < 0) { + free(buf); + return NULL; + } + return buf; +} + +/* Full CPUID format for x86 is vendor-family-model-stepping */ +static bool is_full_cpuid(const char *id) +{ + const char *tmp = id; + int count = 0; + + while ((tmp = strchr(tmp, '-')) != NULL) { + count++; + tmp++; + } + + if (count == 3) + return true; + + return false; +} + +int strcmp_cpuid_str(const char *mapcpuid, const char *id) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + bool full_mapcpuid = is_full_cpuid(mapcpuid); + bool full_cpuid = is_full_cpuid(id); + + /* + * Full CPUID format is required to identify a platform. + * Error out if the cpuid string is incomplete. + */ + if (full_mapcpuid && !full_cpuid) { + pr_info("Invalid CPUID %s. Full CPUID is required, " + "vendor-family-model-stepping\n", id); + return 1; + } + + if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", mapcpuid); + return 1; + } + + match = !regexec(&re, id, 1, pmatch, 0); + regfree(&re); + if (match) { + size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); + size_t cpuid_len; + + /* If the full CPUID format isn't required, + * ignoring the stepping. + */ + if (!full_mapcpuid && full_cpuid) + cpuid_len = strrchr(id, '-') - id; + else + cpuid_len = strlen(id); + + /* Verify the entire string matched. */ + if (match_len == cpuid_len) + return 0; + } + + return 1; +} diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c new file mode 100644 index 000000000..0dc09b580 --- /dev/null +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -0,0 +1,449 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * intel-bts.c: Intel Processor Trace support + * Copyright (c) 2013-2015, Intel Corporation. + */ + +#include <errno.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <linux/zalloc.h> + +#include "../../../util/cpumap.h" +#include "../../../util/event.h" +#include "../../../util/evsel.h" +#include "../../../util/evlist.h" +#include "../../../util/mmap.h" +#include "../../../util/session.h" +#include "../../../util/pmu.h" +#include "../../../util/debug.h" +#include "../../../util/record.h" +#include "../../../util/tsc.h" +#include "../../../util/auxtrace.h" +#include "../../../util/intel-bts.h" +#include <internal/lib.h> // page_size + +#define KiB(x) ((x) * 1024) +#define MiB(x) ((x) * 1024 * 1024) +#define KiB_MASK(x) (KiB(x) - 1) +#define MiB_MASK(x) (MiB(x) - 1) + +struct intel_bts_snapshot_ref { + void *ref_buf; + size_t ref_offset; + bool wrapped; +}; + +struct intel_bts_recording { + struct auxtrace_record itr; + struct perf_pmu *intel_bts_pmu; + struct evlist *evlist; + bool snapshot_mode; + size_t snapshot_size; + int snapshot_ref_cnt; + struct intel_bts_snapshot_ref *snapshot_refs; +}; + +struct branch { + u64 from; + u64 to; + u64 misc; +}; + +static size_t +intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused, + struct evlist *evlist __maybe_unused) +{ + return INTEL_BTS_AUXTRACE_PRIV_SIZE; +} + +static int intel_bts_info_fill(struct auxtrace_record *itr, + struct perf_session *session, + struct perf_record_auxtrace_info *auxtrace_info, + size_t priv_size) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu; + struct perf_event_mmap_page *pc; + struct perf_tsc_conversion tc = { .time_mult = 0, }; + bool cap_user_time_zero = false; + int err; + + if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE) + return -EINVAL; + + if (!session->evlist->core.nr_mmaps) + return -EINVAL; + + pc = session->evlist->mmap[0].core.base; + if (pc) { + err = perf_read_tsc_conversion(pc, &tc); + if (err) { + if (err != -EOPNOTSUPP) + return err; + } else { + cap_user_time_zero = tc.time_mult != 0; + } + if (!cap_user_time_zero) + ui__warning("Intel BTS: TSC not available\n"); + } + + auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS; + auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type; + auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift; + auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult; + auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero; + auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero; + auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode; + + return 0; +} + +static int intel_bts_recording_options(struct auxtrace_record *itr, + struct evlist *evlist, + struct record_opts *opts) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu; + struct evsel *evsel, *intel_bts_evsel = NULL; + const struct perf_cpu_map *cpus = evlist->core.cpus; + bool privileged = perf_event_paranoid_check(-1); + + if (opts->auxtrace_sample_mode) { + pr_err("Intel BTS does not support AUX area sampling\n"); + return -EINVAL; + } + + btsr->evlist = evlist; + btsr->snapshot_mode = opts->auxtrace_snapshot_mode; + + evlist__for_each_entry(evlist, evsel) { + if (evsel->core.attr.type == intel_bts_pmu->type) { + if (intel_bts_evsel) { + pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n"); + return -EINVAL; + } + evsel->core.attr.freq = 0; + evsel->core.attr.sample_period = 1; + intel_bts_evsel = evsel; + opts->full_auxtrace = true; + } + } + + if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { + pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n"); + return -EINVAL; + } + + if (!opts->full_auxtrace) + return 0; + + if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) { + pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); + return -EINVAL; + } + + /* Set default sizes for snapshot mode */ + if (opts->auxtrace_snapshot_mode) { + if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + } else if (!opts->auxtrace_mmap_pages && !privileged && + opts->mmap_pages == UINT_MAX) { + opts->mmap_pages = KiB(256) / page_size; + } + if (!opts->auxtrace_snapshot_size) + opts->auxtrace_snapshot_size = + opts->auxtrace_mmap_pages * (size_t)page_size; + if (!opts->auxtrace_mmap_pages) { + size_t sz = opts->auxtrace_snapshot_size; + + sz = round_up(sz, page_size) / page_size; + opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); + } + if (opts->auxtrace_snapshot_size > + opts->auxtrace_mmap_pages * (size_t)page_size) { + pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", + opts->auxtrace_snapshot_size, + opts->auxtrace_mmap_pages * (size_t)page_size); + return -EINVAL; + } + if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { + pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); + return -EINVAL; + } + pr_debug2("Intel BTS snapshot size: %zu\n", + opts->auxtrace_snapshot_size); + } + + /* Set default sizes for full trace mode */ + if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + } + + /* Validate auxtrace_mmap_pages */ + if (opts->auxtrace_mmap_pages) { + size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; + size_t min_sz; + + if (opts->auxtrace_snapshot_mode) + min_sz = KiB(4); + else + min_sz = KiB(8); + + if (sz < min_sz || !is_power_of_2(sz)) { + pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n", + min_sz / 1024); + return -EINVAL; + } + } + + if (intel_bts_evsel) { + /* + * To obtain the auxtrace buffer file descriptor, the auxtrace event + * must come first. + */ + perf_evlist__to_front(evlist, intel_bts_evsel); + /* + * In the case of per-cpu mmaps, we need the CPU on the + * AUX event. + */ + if (!perf_cpu_map__empty(cpus)) + evsel__set_sample_bit(intel_bts_evsel, CPU); + } + + /* Add dummy event to keep tracking */ + if (opts->full_auxtrace) { + struct evsel *tracking_evsel; + int err; + + err = parse_events(evlist, "dummy:u", NULL); + if (err) + return err; + + tracking_evsel = evlist__last(evlist); + + perf_evlist__set_tracking_event(evlist, tracking_evsel); + + tracking_evsel->core.attr.freq = 0; + tracking_evsel->core.attr.sample_period = 1; + } + + return 0; +} + +static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr, + struct record_opts *opts, + const char *str) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + unsigned long long snapshot_size = 0; + char *endptr; + + if (str) { + snapshot_size = strtoull(str, &endptr, 0); + if (*endptr || snapshot_size > SIZE_MAX) + return -1; + } + + opts->auxtrace_snapshot_mode = true; + opts->auxtrace_snapshot_size = snapshot_size; + + btsr->snapshot_size = snapshot_size; + + return 0; +} + +static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused) +{ + return rdtsc(); +} + +static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr, + int idx) +{ + const size_t sz = sizeof(struct intel_bts_snapshot_ref); + int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2; + struct intel_bts_snapshot_ref *refs; + + if (!new_cnt) + new_cnt = 16; + + while (new_cnt <= idx) + new_cnt *= 2; + + refs = calloc(new_cnt, sz); + if (!refs) + return -ENOMEM; + + memcpy(refs, btsr->snapshot_refs, cnt * sz); + + btsr->snapshot_refs = refs; + btsr->snapshot_ref_cnt = new_cnt; + + return 0; +} + +static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr) +{ + int i; + + for (i = 0; i < btsr->snapshot_ref_cnt; i++) + zfree(&btsr->snapshot_refs[i].ref_buf); + zfree(&btsr->snapshot_refs); +} + +static void intel_bts_recording_free(struct auxtrace_record *itr) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + + intel_bts_free_snapshot_refs(btsr); + free(btsr); +} + +static int intel_bts_snapshot_start(struct auxtrace_record *itr) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + struct evsel *evsel; + + evlist__for_each_entry(btsr->evlist, evsel) { + if (evsel->core.attr.type == btsr->intel_bts_pmu->type) + return evsel__disable(evsel); + } + return -EINVAL; +} + +static int intel_bts_snapshot_finish(struct auxtrace_record *itr) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + struct evsel *evsel; + + evlist__for_each_entry(btsr->evlist, evsel) { + if (evsel->core.attr.type == btsr->intel_bts_pmu->type) + return evsel__enable(evsel); + } + return -EINVAL; +} + +static bool intel_bts_first_wrap(u64 *data, size_t buf_size) +{ + int i, a, b; + + b = buf_size >> 3; + a = b - 512; + if (a < 0) + a = 0; + + for (i = a; i < b; i++) { + if (data[i]) + return true; + } + + return false; +} + +static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx, + struct auxtrace_mmap *mm, unsigned char *data, + u64 *head, u64 *old) +{ + struct intel_bts_recording *btsr = + container_of(itr, struct intel_bts_recording, itr); + bool wrapped; + int err; + + pr_debug3("%s: mmap index %d old head %zu new head %zu\n", + __func__, idx, (size_t)*old, (size_t)*head); + + if (idx >= btsr->snapshot_ref_cnt) { + err = intel_bts_alloc_snapshot_refs(btsr, idx); + if (err) + goto out_err; + } + + wrapped = btsr->snapshot_refs[idx].wrapped; + if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) { + btsr->snapshot_refs[idx].wrapped = true; + wrapped = true; + } + + /* + * In full trace mode 'head' continually increases. However in snapshot + * mode 'head' is an offset within the buffer. Here 'old' and 'head' + * are adjusted to match the full trace case which expects that 'old' is + * always less than 'head'. + */ + if (wrapped) { + *old = *head; + *head += mm->len; + } else { + if (mm->mask) + *old &= mm->mask; + else + *old %= mm->len; + if (*old > *head) + *head += mm->len; + } + + pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n", + __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); + + return 0; + +out_err: + pr_err("%s: failed, error %d\n", __func__, err); + return err; +} + +struct auxtrace_record *intel_bts_recording_init(int *err) +{ + struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME); + struct intel_bts_recording *btsr; + + if (!intel_bts_pmu) + return NULL; + + if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) { + *err = -errno; + return NULL; + } + + btsr = zalloc(sizeof(struct intel_bts_recording)); + if (!btsr) { + *err = -ENOMEM; + return NULL; + } + + btsr->intel_bts_pmu = intel_bts_pmu; + btsr->itr.pmu = intel_bts_pmu; + btsr->itr.recording_options = intel_bts_recording_options; + btsr->itr.info_priv_size = intel_bts_info_priv_size; + btsr->itr.info_fill = intel_bts_info_fill; + btsr->itr.free = intel_bts_recording_free; + btsr->itr.snapshot_start = intel_bts_snapshot_start; + btsr->itr.snapshot_finish = intel_bts_snapshot_finish; + btsr->itr.find_snapshot = intel_bts_find_snapshot; + btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options; + btsr->itr.reference = intel_bts_reference; + btsr->itr.read_finish = auxtrace_record__read_finish; + btsr->itr.alignment = sizeof(struct branch); + return &btsr->itr; +} diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c new file mode 100644 index 000000000..082e5f2a4 --- /dev/null +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -0,0 +1,1221 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * intel_pt.c: Intel Processor Trace support + * Copyright (c) 2013-2015, Intel Corporation. + */ + +#include <errno.h> +#include <stdbool.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <linux/zalloc.h> +#include <cpuid.h> + +#include "../../../util/session.h" +#include "../../../util/event.h" +#include "../../../util/evlist.h" +#include "../../../util/evsel.h" +#include "../../../util/evsel_config.h" +#include "../../../util/cpumap.h" +#include "../../../util/mmap.h" +#include <subcmd/parse-options.h> +#include "../../../util/parse-events.h" +#include "../../../util/pmu.h" +#include "../../../util/debug.h" +#include "../../../util/auxtrace.h" +#include "../../../util/perf_api_probe.h" +#include "../../../util/record.h" +#include "../../../util/target.h" +#include "../../../util/tsc.h" +#include <internal/lib.h> // page_size +#include "../../../util/intel-pt.h" + +#define KiB(x) ((x) * 1024) +#define MiB(x) ((x) * 1024 * 1024) +#define KiB_MASK(x) (KiB(x) - 1) +#define MiB_MASK(x) (MiB(x) - 1) + +#define INTEL_PT_PSB_PERIOD_NEAR 256 + +struct intel_pt_snapshot_ref { + void *ref_buf; + size_t ref_offset; + bool wrapped; +}; + +struct intel_pt_recording { + struct auxtrace_record itr; + struct perf_pmu *intel_pt_pmu; + int have_sched_switch; + struct evlist *evlist; + bool snapshot_mode; + bool snapshot_init_done; + size_t snapshot_size; + size_t snapshot_ref_buf_size; + int snapshot_ref_cnt; + struct intel_pt_snapshot_ref *snapshot_refs; + size_t priv_size; +}; + +static int intel_pt_parse_terms_with_default(const char *pmu_name, + struct list_head *formats, + const char *str, + u64 *config) +{ + struct list_head *terms; + struct perf_event_attr attr = { .size = 0, }; + int err; + + terms = malloc(sizeof(struct list_head)); + if (!terms) + return -ENOMEM; + + INIT_LIST_HEAD(terms); + + err = parse_events_terms(terms, str); + if (err) + goto out_free; + + attr.config = *config; + err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true, + NULL); + if (err) + goto out_free; + + *config = attr.config; +out_free: + parse_events_terms__delete(terms); + return err; +} + +static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats, + const char *str, u64 *config) +{ + *config = 0; + return intel_pt_parse_terms_with_default(pmu_name, formats, str, + config); +} + +static u64 intel_pt_masked_bits(u64 mask, u64 bits) +{ + const u64 top_bit = 1ULL << 63; + u64 res = 0; + int i; + + for (i = 0; i < 64; i++) { + if (mask & top_bit) { + res <<= 1; + if (bits & top_bit) + res |= 1; + } + mask <<= 1; + bits <<= 1; + } + + return res; +} + +static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str, + struct evlist *evlist, u64 *res) +{ + struct evsel *evsel; + u64 mask; + + *res = 0; + + mask = perf_pmu__format_bits(&intel_pt_pmu->format, str); + if (!mask) + return -EINVAL; + + evlist__for_each_entry(evlist, evsel) { + if (evsel->core.attr.type == intel_pt_pmu->type) { + *res = intel_pt_masked_bits(mask, evsel->core.attr.config); + return 0; + } + } + + return -EINVAL; +} + +static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu, + struct evlist *evlist) +{ + u64 val; + int err, topa_multiple_entries; + size_t psb_period; + + if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries", + "%d", &topa_multiple_entries) != 1) + topa_multiple_entries = 0; + + /* + * Use caps/topa_multiple_entries to indicate early hardware that had + * extra frequent PSBs. + */ + if (!topa_multiple_entries) { + psb_period = 256; + goto out; + } + + err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val); + if (err) + val = 0; + + psb_period = 1 << (val + 11); +out: + pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period); + return psb_period; +} + +static int intel_pt_pick_bit(int bits, int target) +{ + int pos, pick = -1; + + for (pos = 0; bits; bits >>= 1, pos++) { + if (bits & 1) { + if (pos <= target || pick < 0) + pick = pos; + if (pos >= target) + break; + } + } + + return pick; +} + +static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) +{ + char buf[256]; + int mtc, mtc_periods = 0, mtc_period; + int psb_cyc, psb_periods, psb_period; + int pos = 0; + u64 config; + char c; + + pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc"); + + if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d", + &mtc) != 1) + mtc = 1; + + if (mtc) { + if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x", + &mtc_periods) != 1) + mtc_periods = 0; + if (mtc_periods) { + mtc_period = intel_pt_pick_bit(mtc_periods, 3); + pos += scnprintf(buf + pos, sizeof(buf) - pos, + ",mtc,mtc_period=%d", mtc_period); + } + } + + if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d", + &psb_cyc) != 1) + psb_cyc = 1; + + if (psb_cyc && mtc_periods) { + if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x", + &psb_periods) != 1) + psb_periods = 0; + if (psb_periods) { + psb_period = intel_pt_pick_bit(psb_periods, 3); + pos += scnprintf(buf + pos, sizeof(buf) - pos, + ",psb_period=%d", psb_period); + } + } + + if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && + perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1) + pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch"); + + pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf); + + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf, + &config); + + return config; +} + +static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr, + struct record_opts *opts, + const char *str) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + unsigned long long snapshot_size = 0; + char *endptr; + + if (str) { + snapshot_size = strtoull(str, &endptr, 0); + if (*endptr || snapshot_size > SIZE_MAX) + return -1; + } + + opts->auxtrace_snapshot_mode = true; + opts->auxtrace_snapshot_size = snapshot_size; + + ptr->snapshot_size = snapshot_size; + + return 0; +} + +struct perf_event_attr * +intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu) +{ + struct perf_event_attr *attr; + + attr = zalloc(sizeof(struct perf_event_attr)); + if (!attr) + return NULL; + + attr->config = intel_pt_default_config(intel_pt_pmu); + + intel_pt_pmu->selectable = true; + + return attr; +} + +static const char *intel_pt_find_filter(struct evlist *evlist, + struct perf_pmu *intel_pt_pmu) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + if (evsel->core.attr.type == intel_pt_pmu->type) + return evsel->filter; + } + + return NULL; +} + +static size_t intel_pt_filter_bytes(const char *filter) +{ + size_t len = filter ? strlen(filter) : 0; + + return len ? roundup(len + 1, 8) : 0; +} + +static size_t +intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu); + + ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) + + intel_pt_filter_bytes(filter); + + return ptr->priv_size; +} + +static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d) +{ + unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; + + __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); + *n = ebx; + *d = eax; +} + +static int intel_pt_info_fill(struct auxtrace_record *itr, + struct perf_session *session, + struct perf_record_auxtrace_info *auxtrace_info, + size_t priv_size) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; + struct perf_event_mmap_page *pc; + struct perf_tsc_conversion tc = { .time_mult = 0, }; + bool cap_user_time_zero = false, per_cpu_mmaps; + u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit; + u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d; + unsigned long max_non_turbo_ratio; + size_t filter_str_len; + const char *filter; + __u64 *info; + int err; + + if (priv_size != ptr->priv_size) + return -EINVAL; + + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "tsc", &tsc_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "noretcomp", &noretcomp_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "mtc", &mtc_bit); + mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format, + "mtc_period"); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "cyc", &cyc_bit); + + intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d); + + if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio", + "%lu", &max_non_turbo_ratio) != 1) + max_non_turbo_ratio = 0; + + filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu); + filter_str_len = filter ? strlen(filter) : 0; + + if (!session->evlist->core.nr_mmaps) + return -EINVAL; + + pc = session->evlist->mmap[0].core.base; + if (pc) { + err = perf_read_tsc_conversion(pc, &tc); + if (err) { + if (err != -EOPNOTSUPP) + return err; + } else { + cap_user_time_zero = tc.time_mult != 0; + } + if (!cap_user_time_zero) + ui__warning("Intel Processor Trace: TSC not available\n"); + } + + per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus); + + auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; + auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; + auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift; + auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult; + auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero; + auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero; + auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit; + auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit; + auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch; + auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode; + auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps; + auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit; + auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits; + auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n; + auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d; + auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit; + auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio; + auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len; + + info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; + + if (filter_str_len) { + size_t len = intel_pt_filter_bytes(filter); + + strncpy((char *)info, filter, len); + info += len >> 3; + } + + return 0; +} + +static int intel_pt_track_switches(struct evlist *evlist) +{ + const char *sched_switch = "sched:sched_switch"; + struct evsel *evsel; + int err; + + if (!perf_evlist__can_select_event(evlist, sched_switch)) + return -EPERM; + + err = parse_events(evlist, sched_switch, NULL); + if (err) { + pr_debug2("%s: failed to parse %s, error %d\n", + __func__, sched_switch, err); + return err; + } + + evsel = evlist__last(evlist); + + evsel__set_sample_bit(evsel, CPU); + evsel__set_sample_bit(evsel, TIME); + + evsel->core.system_wide = true; + evsel->no_aux_samples = true; + evsel->immediate = true; + + return 0; +} + +static void intel_pt_valid_str(char *str, size_t len, u64 valid) +{ + unsigned int val, last = 0, state = 1; + int p = 0; + + str[0] = '\0'; + + for (val = 0; val <= 64; val++, valid >>= 1) { + if (valid & 1) { + last = val; + switch (state) { + case 0: + p += scnprintf(str + p, len - p, ","); + /* Fall through */ + case 1: + p += scnprintf(str + p, len - p, "%u", val); + state = 2; + break; + case 2: + state = 3; + break; + case 3: + state = 4; + break; + default: + break; + } + } else { + switch (state) { + case 3: + p += scnprintf(str + p, len - p, ",%u", last); + state = 0; + break; + case 4: + p += scnprintf(str + p, len - p, "-%u", last); + state = 0; + break; + default: + break; + } + if (state != 1) + state = 0; + } + } +} + +static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, + const char *caps, const char *name, + const char *supported, u64 config) +{ + char valid_str[256]; + unsigned int shift; + unsigned long long valid; + u64 bits; + int ok; + + if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1) + valid = 0; + + if (supported && + perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok) + valid = 0; + + valid |= 1; + + bits = perf_pmu__format_bits(&intel_pt_pmu->format, name); + + config &= bits; + + for (shift = 0; bits && !(bits & 1); shift++) + bits >>= 1; + + config >>= shift; + + if (config > 63) + goto out_err; + + if (valid & (1 << config)) + return 0; +out_err: + intel_pt_valid_str(valid_str, sizeof(valid_str), valid); + pr_err("Invalid %s for %s. Valid values are: %s\n", + name, INTEL_PT_PMU_NAME, valid_str); + return -EINVAL; +} + +static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, + struct evsel *evsel) +{ + int err; + char c; + + if (!evsel) + return 0; + + /* + * If supported, force pass-through config term (pt=1) even if user + * sets pt=0, which avoids senseless kernel errors. + */ + if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && + !(evsel->core.attr.config & 1)) { + pr_warning("pt=0 doesn't make sense, forcing pt=1\n"); + evsel->core.attr.config |= 1; + } + + err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", + "cyc_thresh", "caps/psb_cyc", + evsel->core.attr.config); + if (err) + return err; + + err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods", + "mtc_period", "caps/mtc", + evsel->core.attr.config); + if (err) + return err; + + return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods", + "psb_period", "caps/psb_cyc", + evsel->core.attr.config); +} + +static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu, + struct evsel *evsel) +{ + u64 user_bits = 0, bits; + struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG); + + if (term) + user_bits = term->val.cfg_chg; + + bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period"); + + /* Did user change psb_period */ + if (bits & user_bits) + return; + + /* Set psb_period to 0 */ + evsel->core.attr.config &= ~bits; +} + +static void intel_pt_min_max_sample_sz(struct evlist *evlist, + size_t *min_sz, size_t *max_sz) +{ + struct evsel *evsel; + + evlist__for_each_entry(evlist, evsel) { + size_t sz = evsel->core.attr.aux_sample_size; + + if (!sz) + continue; + if (min_sz && (sz < *min_sz || !*min_sz)) + *min_sz = sz; + if (max_sz && sz > *max_sz) + *max_sz = sz; + } +} + +/* + * Currently, there is not enough information to disambiguate different PEBS + * events, so only allow one. + */ +static bool intel_pt_too_many_aux_output(struct evlist *evlist) +{ + struct evsel *evsel; + int aux_output_cnt = 0; + + evlist__for_each_entry(evlist, evsel) + aux_output_cnt += !!evsel->core.attr.aux_output; + + if (aux_output_cnt > 1) { + pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n"); + return true; + } + + return false; +} + +static int intel_pt_recording_options(struct auxtrace_record *itr, + struct evlist *evlist, + struct record_opts *opts) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; + bool have_timing_info, need_immediate = false; + struct evsel *evsel, *intel_pt_evsel = NULL; + const struct perf_cpu_map *cpus = evlist->core.cpus; + bool privileged = perf_event_paranoid_check(-1); + u64 tsc_bit; + int err; + + ptr->evlist = evlist; + ptr->snapshot_mode = opts->auxtrace_snapshot_mode; + + evlist__for_each_entry(evlist, evsel) { + if (evsel->core.attr.type == intel_pt_pmu->type) { + if (intel_pt_evsel) { + pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n"); + return -EINVAL; + } + evsel->core.attr.freq = 0; + evsel->core.attr.sample_period = 1; + evsel->no_aux_samples = true; + intel_pt_evsel = evsel; + opts->full_auxtrace = true; + } + } + + if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { + pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n"); + return -EINVAL; + } + + if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) { + pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n"); + return -EINVAL; + } + + if (opts->use_clockid) { + pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n"); + return -EINVAL; + } + + if (intel_pt_too_many_aux_output(evlist)) + return -EINVAL; + + if (!opts->full_auxtrace) + return 0; + + if (opts->auxtrace_sample_mode) + intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel); + + err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel); + if (err) + return err; + + /* Set default sizes for snapshot mode */ + if (opts->auxtrace_snapshot_mode) { + size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist); + + if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + } else if (!opts->auxtrace_mmap_pages && !privileged && + opts->mmap_pages == UINT_MAX) { + opts->mmap_pages = KiB(256) / page_size; + } + if (!opts->auxtrace_snapshot_size) + opts->auxtrace_snapshot_size = + opts->auxtrace_mmap_pages * (size_t)page_size; + if (!opts->auxtrace_mmap_pages) { + size_t sz = opts->auxtrace_snapshot_size; + + sz = round_up(sz, page_size) / page_size; + opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); + } + if (opts->auxtrace_snapshot_size > + opts->auxtrace_mmap_pages * (size_t)page_size) { + pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", + opts->auxtrace_snapshot_size, + opts->auxtrace_mmap_pages * (size_t)page_size); + return -EINVAL; + } + if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { + pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); + return -EINVAL; + } + pr_debug2("Intel PT snapshot size: %zu\n", + opts->auxtrace_snapshot_size); + if (psb_period && + opts->auxtrace_snapshot_size <= psb_period + + INTEL_PT_PSB_PERIOD_NEAR) + ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n", + opts->auxtrace_snapshot_size, psb_period); + } + + /* Set default sizes for sample mode */ + if (opts->auxtrace_sample_mode) { + size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist); + size_t min_sz = 0, max_sz = 0; + + intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz); + if (!opts->auxtrace_mmap_pages && !privileged && + opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + if (!opts->auxtrace_mmap_pages) { + size_t sz = round_up(max_sz, page_size) / page_size; + + opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); + } + if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) { + pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n", + max_sz, + opts->auxtrace_mmap_pages * (size_t)page_size); + return -EINVAL; + } + pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n", + min_sz, max_sz); + if (psb_period && + min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR) + ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n", + min_sz, psb_period); + } + + /* Set default sizes for full trace mode */ + if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { + if (privileged) { + opts->auxtrace_mmap_pages = MiB(4) / page_size; + } else { + opts->auxtrace_mmap_pages = KiB(128) / page_size; + if (opts->mmap_pages == UINT_MAX) + opts->mmap_pages = KiB(256) / page_size; + } + } + + /* Validate auxtrace_mmap_pages */ + if (opts->auxtrace_mmap_pages) { + size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; + size_t min_sz; + + if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode) + min_sz = KiB(4); + else + min_sz = KiB(8); + + if (sz < min_sz || !is_power_of_2(sz)) { + pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n", + min_sz / 1024); + return -EINVAL; + } + } + + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "tsc", &tsc_bit); + + if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit)) + have_timing_info = true; + else + have_timing_info = false; + + /* + * Per-cpu recording needs sched_switch events to distinguish different + * threads. + */ + if (have_timing_info && !perf_cpu_map__empty(cpus) && + !record_opts__no_switch_events(opts)) { + if (perf_can_record_switch_events()) { + bool cpu_wide = !target__none(&opts->target) && + !target__has_task(&opts->target); + + if (!cpu_wide && perf_can_record_cpu_wide()) { + struct evsel *switch_evsel; + + err = parse_events(evlist, "dummy:u", NULL); + if (err) + return err; + + switch_evsel = evlist__last(evlist); + + switch_evsel->core.attr.freq = 0; + switch_evsel->core.attr.sample_period = 1; + switch_evsel->core.attr.context_switch = 1; + + switch_evsel->core.system_wide = true; + switch_evsel->no_aux_samples = true; + switch_evsel->immediate = true; + + evsel__set_sample_bit(switch_evsel, TID); + evsel__set_sample_bit(switch_evsel, TIME); + evsel__set_sample_bit(switch_evsel, CPU); + evsel__reset_sample_bit(switch_evsel, BRANCH_STACK); + + opts->record_switch_events = false; + ptr->have_sched_switch = 3; + } else { + opts->record_switch_events = true; + need_immediate = true; + if (cpu_wide) + ptr->have_sched_switch = 3; + else + ptr->have_sched_switch = 2; + } + } else { + err = intel_pt_track_switches(evlist); + if (err == -EPERM) + pr_debug2("Unable to select sched:sched_switch\n"); + else if (err) + return err; + else + ptr->have_sched_switch = 1; + } + } + + if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel && + perf_can_record_text_poke_events() && perf_can_record_cpu_wide()) + opts->text_poke = true; + + if (intel_pt_evsel) { + /* + * To obtain the auxtrace buffer file descriptor, the auxtrace + * event must come first. + */ + perf_evlist__to_front(evlist, intel_pt_evsel); + /* + * In the case of per-cpu mmaps, we need the CPU on the + * AUX event. + */ + if (!perf_cpu_map__empty(cpus)) + evsel__set_sample_bit(intel_pt_evsel, CPU); + } + + /* Add dummy event to keep tracking */ + if (opts->full_auxtrace) { + struct evsel *tracking_evsel; + + err = parse_events(evlist, "dummy:u", NULL); + if (err) + return err; + + tracking_evsel = evlist__last(evlist); + + perf_evlist__set_tracking_event(evlist, tracking_evsel); + + tracking_evsel->core.attr.freq = 0; + tracking_evsel->core.attr.sample_period = 1; + + tracking_evsel->no_aux_samples = true; + if (need_immediate) + tracking_evsel->immediate = true; + + /* In per-cpu case, always need the time of mmap events etc */ + if (!perf_cpu_map__empty(cpus)) { + evsel__set_sample_bit(tracking_evsel, TIME); + /* And the CPU for switch events */ + evsel__set_sample_bit(tracking_evsel, CPU); + } + evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK); + } + + /* + * Warn the user when we do not have enough information to decode i.e. + * per-cpu with no sched_switch (except workload-only). + */ + if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && + !target__none(&opts->target) && + !intel_pt_evsel->core.attr.exclude_user) + ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); + + return 0; +} + +static int intel_pt_snapshot_start(struct auxtrace_record *itr) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + struct evsel *evsel; + + evlist__for_each_entry(ptr->evlist, evsel) { + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) + return evsel__disable(evsel); + } + return -EINVAL; +} + +static int intel_pt_snapshot_finish(struct auxtrace_record *itr) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + struct evsel *evsel; + + evlist__for_each_entry(ptr->evlist, evsel) { + if (evsel->core.attr.type == ptr->intel_pt_pmu->type) + return evsel__enable(evsel); + } + return -EINVAL; +} + +static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx) +{ + const size_t sz = sizeof(struct intel_pt_snapshot_ref); + int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2; + struct intel_pt_snapshot_ref *refs; + + if (!new_cnt) + new_cnt = 16; + + while (new_cnt <= idx) + new_cnt *= 2; + + refs = calloc(new_cnt, sz); + if (!refs) + return -ENOMEM; + + memcpy(refs, ptr->snapshot_refs, cnt * sz); + + ptr->snapshot_refs = refs; + ptr->snapshot_ref_cnt = new_cnt; + + return 0; +} + +static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr) +{ + int i; + + for (i = 0; i < ptr->snapshot_ref_cnt; i++) + zfree(&ptr->snapshot_refs[i].ref_buf); + zfree(&ptr->snapshot_refs); +} + +static void intel_pt_recording_free(struct auxtrace_record *itr) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + + intel_pt_free_snapshot_refs(ptr); + free(ptr); +} + +static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx, + size_t snapshot_buf_size) +{ + size_t ref_buf_size = ptr->snapshot_ref_buf_size; + void *ref_buf; + + ref_buf = zalloc(ref_buf_size); + if (!ref_buf) + return -ENOMEM; + + ptr->snapshot_refs[idx].ref_buf = ref_buf; + ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size; + + return 0; +} + +static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr, + size_t snapshot_buf_size) +{ + const size_t max_size = 256 * 1024; + size_t buf_size = 0, psb_period; + + if (ptr->snapshot_size <= 64 * 1024) + return 0; + + psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist); + if (psb_period) + buf_size = psb_period * 2; + + if (!buf_size || buf_size > max_size) + buf_size = max_size; + + if (buf_size >= snapshot_buf_size) + return 0; + + if (buf_size >= ptr->snapshot_size / 2) + return 0; + + return buf_size; +} + +static int intel_pt_snapshot_init(struct intel_pt_recording *ptr, + size_t snapshot_buf_size) +{ + if (ptr->snapshot_init_done) + return 0; + + ptr->snapshot_init_done = true; + + ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr, + snapshot_buf_size); + + return 0; +} + +/** + * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer. + * @buf1: first buffer + * @compare_size: number of bytes to compare + * @buf2: second buffer (a circular buffer) + * @offs2: offset in second buffer + * @buf2_size: size of second buffer + * + * The comparison allows for the possibility that the bytes to compare in the + * circular buffer are not contiguous. It is assumed that @compare_size <= + * @buf2_size. This function returns %false if the bytes are identical, %true + * otherwise. + */ +static bool intel_pt_compare_buffers(void *buf1, size_t compare_size, + void *buf2, size_t offs2, size_t buf2_size) +{ + size_t end2 = offs2 + compare_size, part_size; + + if (end2 <= buf2_size) + return memcmp(buf1, buf2 + offs2, compare_size); + + part_size = end2 - buf2_size; + if (memcmp(buf1, buf2 + offs2, part_size)) + return true; + + compare_size -= part_size; + + return memcmp(buf1 + part_size, buf2, compare_size); +} + +static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset, + size_t ref_size, size_t buf_size, + void *data, size_t head) +{ + size_t ref_end = ref_offset + ref_size; + + if (ref_end > buf_size) { + if (head > ref_offset || head < ref_end - buf_size) + return true; + } else if (head > ref_offset && head < ref_end) { + return true; + } + + return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset, + buf_size); +} + +static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size, + void *data, size_t head) +{ + if (head >= ref_size) { + memcpy(ref_buf, data + head - ref_size, ref_size); + } else { + memcpy(ref_buf, data, head); + ref_size -= head; + memcpy(ref_buf + head, data + buf_size - ref_size, ref_size); + } +} + +static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx, + struct auxtrace_mmap *mm, unsigned char *data, + u64 head) +{ + struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx]; + bool wrapped; + + wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset, + ptr->snapshot_ref_buf_size, mm->len, + data, head); + + intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len, + data, head); + + return wrapped; +} + +static bool intel_pt_first_wrap(u64 *data, size_t buf_size) +{ + int i, a, b; + + b = buf_size >> 3; + a = b - 512; + if (a < 0) + a = 0; + + for (i = a; i < b; i++) { + if (data[i]) + return true; + } + + return false; +} + +static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx, + struct auxtrace_mmap *mm, unsigned char *data, + u64 *head, u64 *old) +{ + struct intel_pt_recording *ptr = + container_of(itr, struct intel_pt_recording, itr); + bool wrapped; + int err; + + pr_debug3("%s: mmap index %d old head %zu new head %zu\n", + __func__, idx, (size_t)*old, (size_t)*head); + + err = intel_pt_snapshot_init(ptr, mm->len); + if (err) + goto out_err; + + if (idx >= ptr->snapshot_ref_cnt) { + err = intel_pt_alloc_snapshot_refs(ptr, idx); + if (err) + goto out_err; + } + + if (ptr->snapshot_ref_buf_size) { + if (!ptr->snapshot_refs[idx].ref_buf) { + err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len); + if (err) + goto out_err; + } + wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head); + } else { + wrapped = ptr->snapshot_refs[idx].wrapped; + if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) { + ptr->snapshot_refs[idx].wrapped = true; + wrapped = true; + } + } + + /* + * In full trace mode 'head' continually increases. However in snapshot + * mode 'head' is an offset within the buffer. Here 'old' and 'head' + * are adjusted to match the full trace case which expects that 'old' is + * always less than 'head'. + */ + if (wrapped) { + *old = *head; + *head += mm->len; + } else { + if (mm->mask) + *old &= mm->mask; + else + *old %= mm->len; + if (*old > *head) + *head += mm->len; + } + + pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n", + __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); + + return 0; + +out_err: + pr_err("%s: failed, error %d\n", __func__, err); + return err; +} + +static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused) +{ + return rdtsc(); +} + +struct auxtrace_record *intel_pt_recording_init(int *err) +{ + struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME); + struct intel_pt_recording *ptr; + + if (!intel_pt_pmu) + return NULL; + + if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) { + *err = -errno; + return NULL; + } + + ptr = zalloc(sizeof(struct intel_pt_recording)); + if (!ptr) { + *err = -ENOMEM; + return NULL; + } + + ptr->intel_pt_pmu = intel_pt_pmu; + ptr->itr.pmu = intel_pt_pmu; + ptr->itr.recording_options = intel_pt_recording_options; + ptr->itr.info_priv_size = intel_pt_info_priv_size; + ptr->itr.info_fill = intel_pt_info_fill; + ptr->itr.free = intel_pt_recording_free; + ptr->itr.snapshot_start = intel_pt_snapshot_start; + ptr->itr.snapshot_finish = intel_pt_snapshot_finish; + ptr->itr.find_snapshot = intel_pt_find_snapshot; + ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options; + ptr->itr.reference = intel_pt_reference; + ptr->itr.read_finish = auxtrace_record__read_finish; + /* + * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K + * should give at least 1 PSB per sample. + */ + ptr->itr.default_aux_sample_size = 4096; + return &ptr->itr; +} diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c new file mode 100644 index 000000000..072920475 --- /dev/null +++ b/tools/perf/arch/x86/util/kvm-stat.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include <string.h> +#include "../../../util/kvm-stat.h" +#include "../../../util/evsel.h" +#include <asm/svm.h> +#include <asm/vmx.h> +#include <asm/kvm.h> + +define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS); +define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS); + +static struct kvm_events_ops exit_events = { + .is_begin_event = exit_event_begin, + .is_end_event = exit_event_end, + .decode_key = exit_event_decode_key, + .name = "VM-EXIT" +}; + +const char *vcpu_id_str = "vcpu_id"; +const int decode_str_len = 20; +const char *kvm_exit_reason = "exit_reason"; +const char *kvm_entry_trace = "kvm:kvm_entry"; +const char *kvm_exit_trace = "kvm:kvm_exit"; + +/* + * For the mmio events, we treat: + * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry + * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). + */ +static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample, + struct event_key *key) +{ + key->key = evsel__intval(evsel, sample, "gpa"); + key->info = evsel__intval(evsel, sample, "type"); +} + +#define KVM_TRACE_MMIO_READ_UNSATISFIED 0 +#define KVM_TRACE_MMIO_READ 1 +#define KVM_TRACE_MMIO_WRITE 2 + +static bool mmio_event_begin(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + /* MMIO read begin event in kernel. */ + if (kvm_exit_event(evsel)) + return true; + + /* MMIO write begin event in kernel. */ + if (!strcmp(evsel->name, "kvm:kvm_mmio") && + evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { + mmio_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample, + struct event_key *key) +{ + /* MMIO write end event in kernel. */ + if (kvm_entry_event(evsel)) + return true; + + /* MMIO read end event in kernel.*/ + if (!strcmp(evsel->name, "kvm:kvm_mmio") && + evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { + mmio_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, + struct event_key *key, + char *decode) +{ + scnprintf(decode, decode_str_len, "%#lx:%s", + (unsigned long)key->key, + key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); +} + +static struct kvm_events_ops mmio_events = { + .is_begin_event = mmio_event_begin, + .is_end_event = mmio_event_end, + .decode_key = mmio_event_decode_key, + .name = "MMIO Access" +}; + + /* The time of emulation pio access is from kvm_pio to kvm_entry. */ +static void ioport_event_get_key(struct evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + key->key = evsel__intval(evsel, sample, "port"); + key->info = evsel__intval(evsel, sample, "rw"); +} + +static bool ioport_event_begin(struct evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + if (!strcmp(evsel->name, "kvm:kvm_pio")) { + ioport_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool ioport_event_end(struct evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + return kvm_entry_event(evsel); +} + +static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, + struct event_key *key, + char *decode) +{ + scnprintf(decode, decode_str_len, "%#llx:%s", + (unsigned long long)key->key, + key->info ? "POUT" : "PIN"); +} + +static struct kvm_events_ops ioport_events = { + .is_begin_event = ioport_event_begin, + .is_end_event = ioport_event_end, + .decode_key = ioport_event_decode_key, + .name = "IO Port Access" +}; + +const char *kvm_events_tp[] = { + "kvm:kvm_entry", + "kvm:kvm_exit", + "kvm:kvm_mmio", + "kvm:kvm_pio", + NULL, +}; + +struct kvm_reg_events_ops kvm_reg_events_ops[] = { + { .name = "vmexit", .ops = &exit_events }, + { .name = "mmio", .ops = &mmio_events }, + { .name = "ioport", .ops = &ioport_events }, + { NULL, NULL }, +}; + +const char * const kvm_skip_events[] = { + "HLT", + NULL, +}; + +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid) +{ + if (strstr(cpuid, "Intel")) { + kvm->exit_reasons = vmx_exit_reasons; + kvm->exit_reasons_isa = "VMX"; + } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) { + kvm->exit_reasons = svm_exit_reasons; + kvm->exit_reasons_isa = "SVM"; + } else + return -ENOTSUP; + + return 0; +} diff --git a/tools/perf/arch/x86/util/machine.c b/tools/perf/arch/x86/util/machine.c new file mode 100644 index 000000000..31679c35d --- /dev/null +++ b/tools/perf/arch/x86/util/machine.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/string.h> +#include <limits.h> +#include <stdlib.h> + +#include <internal/lib.h> // page_size +#include "../../../util/machine.h" +#include "../../../util/map.h" +#include "../../../util/symbol.h" +#include <linux/ctype.h> + +#include <symbol/kallsyms.h> + +#if defined(__x86_64__) + +struct extra_kernel_map_info { + int cnt; + int max_cnt; + struct extra_kernel_map *maps; + bool get_entry_trampolines; + u64 entry_trampoline; +}; + +static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start, + u64 end, u64 pgoff, const char *name) +{ + if (mi->cnt >= mi->max_cnt) { + void *buf; + size_t sz; + + mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32; + sz = sizeof(struct extra_kernel_map) * mi->max_cnt; + buf = realloc(mi->maps, sz); + if (!buf) + return -1; + mi->maps = buf; + } + + mi->maps[mi->cnt].start = start; + mi->maps[mi->cnt].end = end; + mi->maps[mi->cnt].pgoff = pgoff; + strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN); + + mi->cnt += 1; + + return 0; +} + +static int find_extra_kernel_maps(void *arg, const char *name, char type, + u64 start) +{ + struct extra_kernel_map_info *mi = arg; + + if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL && + !strcmp(name, "_entry_trampoline")) { + mi->entry_trampoline = start; + return 0; + } + + if (is_entry_trampoline(name)) { + u64 end = start + page_size; + + return add_extra_kernel_map(mi, start, end, 0, name); + } + + return 0; +} + +int machine__create_extra_kernel_maps(struct machine *machine, + struct dso *kernel) +{ + struct extra_kernel_map_info mi = { .cnt = 0, }; + char filename[PATH_MAX]; + int ret; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + if (symbol__restricted_filename(filename, "/proc/kallsyms")) + return 0; + + ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps); + if (ret) + goto out_free; + + if (!mi.entry_trampoline) + goto out_free; + + for (i = 0; i < mi.cnt; i++) { + struct extra_kernel_map *xm = &mi.maps[i]; + + xm->pgoff = mi.entry_trampoline; + ret = machine__create_extra_kernel_map(machine, kernel, xm); + if (ret) + goto out_free; + } + + machine->trampolines_mapped = mi.cnt; +out_free: + free(mi.maps); + return ret; +} + +#endif diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c new file mode 100644 index 000000000..fca81b39b --- /dev/null +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include <string.h> +#include <regex.h> +#include <linux/kernel.h> +#include <linux/zalloc.h> + +#include "../../../perf-sys.h" +#include "../../../util/perf_regs.h" +#include "../../../util/debug.h" +#include "../../../util/event.h" + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG(AX, PERF_REG_X86_AX), + SMPL_REG(BX, PERF_REG_X86_BX), + SMPL_REG(CX, PERF_REG_X86_CX), + SMPL_REG(DX, PERF_REG_X86_DX), + SMPL_REG(SI, PERF_REG_X86_SI), + SMPL_REG(DI, PERF_REG_X86_DI), + SMPL_REG(BP, PERF_REG_X86_BP), + SMPL_REG(SP, PERF_REG_X86_SP), + SMPL_REG(IP, PERF_REG_X86_IP), + SMPL_REG(FLAGS, PERF_REG_X86_FLAGS), + SMPL_REG(CS, PERF_REG_X86_CS), + SMPL_REG(SS, PERF_REG_X86_SS), +#ifdef HAVE_ARCH_X86_64_SUPPORT + SMPL_REG(R8, PERF_REG_X86_R8), + SMPL_REG(R9, PERF_REG_X86_R9), + SMPL_REG(R10, PERF_REG_X86_R10), + SMPL_REG(R11, PERF_REG_X86_R11), + SMPL_REG(R12, PERF_REG_X86_R12), + SMPL_REG(R13, PERF_REG_X86_R13), + SMPL_REG(R14, PERF_REG_X86_R14), + SMPL_REG(R15, PERF_REG_X86_R15), +#endif + SMPL_REG2(XMM0, PERF_REG_X86_XMM0), + SMPL_REG2(XMM1, PERF_REG_X86_XMM1), + SMPL_REG2(XMM2, PERF_REG_X86_XMM2), + SMPL_REG2(XMM3, PERF_REG_X86_XMM3), + SMPL_REG2(XMM4, PERF_REG_X86_XMM4), + SMPL_REG2(XMM5, PERF_REG_X86_XMM5), + SMPL_REG2(XMM6, PERF_REG_X86_XMM6), + SMPL_REG2(XMM7, PERF_REG_X86_XMM7), + SMPL_REG2(XMM8, PERF_REG_X86_XMM8), + SMPL_REG2(XMM9, PERF_REG_X86_XMM9), + SMPL_REG2(XMM10, PERF_REG_X86_XMM10), + SMPL_REG2(XMM11, PERF_REG_X86_XMM11), + SMPL_REG2(XMM12, PERF_REG_X86_XMM12), + SMPL_REG2(XMM13, PERF_REG_X86_XMM13), + SMPL_REG2(XMM14, PERF_REG_X86_XMM14), + SMPL_REG2(XMM15, PERF_REG_X86_XMM15), + SMPL_REG_END +}; + +struct sdt_name_reg { + const char *sdt_name; + const char *uprobe_name; +}; +#define SDT_NAME_REG(n, m) {.sdt_name = "%" #n, .uprobe_name = "%" #m} +#define SDT_NAME_REG_END {.sdt_name = NULL, .uprobe_name = NULL} + +static const struct sdt_name_reg sdt_reg_tbl[] = { + SDT_NAME_REG(eax, ax), + SDT_NAME_REG(rax, ax), + SDT_NAME_REG(al, ax), + SDT_NAME_REG(ah, ax), + SDT_NAME_REG(ebx, bx), + SDT_NAME_REG(rbx, bx), + SDT_NAME_REG(bl, bx), + SDT_NAME_REG(bh, bx), + SDT_NAME_REG(ecx, cx), + SDT_NAME_REG(rcx, cx), + SDT_NAME_REG(cl, cx), + SDT_NAME_REG(ch, cx), + SDT_NAME_REG(edx, dx), + SDT_NAME_REG(rdx, dx), + SDT_NAME_REG(dl, dx), + SDT_NAME_REG(dh, dx), + SDT_NAME_REG(esi, si), + SDT_NAME_REG(rsi, si), + SDT_NAME_REG(sil, si), + SDT_NAME_REG(edi, di), + SDT_NAME_REG(rdi, di), + SDT_NAME_REG(dil, di), + SDT_NAME_REG(ebp, bp), + SDT_NAME_REG(rbp, bp), + SDT_NAME_REG(bpl, bp), + SDT_NAME_REG(rsp, sp), + SDT_NAME_REG(esp, sp), + SDT_NAME_REG(spl, sp), + + /* rNN registers */ + SDT_NAME_REG(r8b, r8), + SDT_NAME_REG(r8w, r8), + SDT_NAME_REG(r8d, r8), + SDT_NAME_REG(r9b, r9), + SDT_NAME_REG(r9w, r9), + SDT_NAME_REG(r9d, r9), + SDT_NAME_REG(r10b, r10), + SDT_NAME_REG(r10w, r10), + SDT_NAME_REG(r10d, r10), + SDT_NAME_REG(r11b, r11), + SDT_NAME_REG(r11w, r11), + SDT_NAME_REG(r11d, r11), + SDT_NAME_REG(r12b, r12), + SDT_NAME_REG(r12w, r12), + SDT_NAME_REG(r12d, r12), + SDT_NAME_REG(r13b, r13), + SDT_NAME_REG(r13w, r13), + SDT_NAME_REG(r13d, r13), + SDT_NAME_REG(r14b, r14), + SDT_NAME_REG(r14w, r14), + SDT_NAME_REG(r14d, r14), + SDT_NAME_REG(r15b, r15), + SDT_NAME_REG(r15w, r15), + SDT_NAME_REG(r15d, r15), + SDT_NAME_REG_END, +}; + +/* + * Perf only supports OP which is in +/-NUM(REG) form. + * Here plus-minus sign, NUM and parenthesis are optional, + * only REG is mandatory. + * + * SDT events also supports indirect addressing mode with a + * symbol as offset, scaled mode and constants in OP. But + * perf does not support them yet. Below are few examples. + * + * OP with scaled mode: + * (%rax,%rsi,8) + * 10(%ras,%rsi,8) + * + * OP with indirect addressing mode: + * check_action(%rip) + * mp_+52(%rip) + * 44+mp_(%rip) + * + * OP with constant values: + * $0 + * $123 + * $-1 + */ +#define SDT_OP_REGEX "^([+\\-]?)([0-9]*)(\\(?)(%[a-z][a-z0-9]+)(\\)?)$" + +static regex_t sdt_op_regex; + +static int sdt_init_op_regex(void) +{ + static int initialized; + int ret = 0; + + if (initialized) + return 0; + + ret = regcomp(&sdt_op_regex, SDT_OP_REGEX, REG_EXTENDED); + if (ret < 0) { + pr_debug4("Regex compilation error.\n"); + return ret; + } + + initialized = 1; + return 0; +} + +/* + * Max x86 register name length is 5(ex: %r15d). So, 6th char + * should always contain NULL. This helps to find register name + * length using strlen, insted of maintaing one more variable. + */ +#define SDT_REG_NAME_SIZE 6 + +/* + * The uprobe parser does not support all gas register names; + * so, we have to replace them (ex. for x86_64: %rax -> %ax). + * Note: If register does not require renaming, just copy + * paste as it is, but don't leave it empty. + */ +static void sdt_rename_register(char *sdt_reg, int sdt_len, char *uprobe_reg) +{ + int i = 0; + + for (i = 0; sdt_reg_tbl[i].sdt_name != NULL; i++) { + if (!strncmp(sdt_reg_tbl[i].sdt_name, sdt_reg, sdt_len)) { + strcpy(uprobe_reg, sdt_reg_tbl[i].uprobe_name); + return; + } + } + + strncpy(uprobe_reg, sdt_reg, sdt_len); +} + +int arch_sdt_arg_parse_op(char *old_op, char **new_op) +{ + char new_reg[SDT_REG_NAME_SIZE] = {0}; + int new_len = 0, ret; + /* + * rm[0]: +/-NUM(REG) + * rm[1]: +/- + * rm[2]: NUM + * rm[3]: ( + * rm[4]: REG + * rm[5]: ) + */ + regmatch_t rm[6]; + /* + * Max prefix length is 2 as it may contains sign(+/-) + * and displacement 0 (Both sign and displacement 0 are + * optional so it may be empty). Use one more character + * to hold last NULL so that strlen can be used to find + * prefix length, instead of maintaing one more variable. + */ + char prefix[3] = {0}; + + ret = sdt_init_op_regex(); + if (ret < 0) + return ret; + + /* + * If unsupported OR does not match with regex OR + * register name too long, skip it. + */ + if (strchr(old_op, ',') || strchr(old_op, '$') || + regexec(&sdt_op_regex, old_op, 6, rm, 0) || + rm[4].rm_eo - rm[4].rm_so > SDT_REG_NAME_SIZE) { + pr_debug4("Skipping unsupported SDT argument: %s\n", old_op); + return SDT_ARG_SKIP; + } + + /* + * Prepare prefix. + * If SDT OP has parenthesis but does not provide + * displacement, add 0 for displacement. + * SDT Uprobe Prefix + * ----------------------------- + * +24(%rdi) +24(%di) + + * 24(%rdi) +24(%di) + + * %rdi %di + * (%rdi) +0(%di) +0 + * -80(%rbx) -80(%bx) - + */ + if (rm[3].rm_so != rm[3].rm_eo) { + if (rm[1].rm_so != rm[1].rm_eo) + prefix[0] = *(old_op + rm[1].rm_so); + else if (rm[2].rm_so != rm[2].rm_eo) + prefix[0] = '+'; + else + scnprintf(prefix, sizeof(prefix), "+0"); + } + + /* Rename register */ + sdt_rename_register(old_op + rm[4].rm_so, rm[4].rm_eo - rm[4].rm_so, + new_reg); + + /* Prepare final OP which should be valid for uprobe_events */ + new_len = strlen(prefix) + + (rm[2].rm_eo - rm[2].rm_so) + + (rm[3].rm_eo - rm[3].rm_so) + + strlen(new_reg) + + (rm[5].rm_eo - rm[5].rm_so) + + 1; /* NULL */ + + *new_op = zalloc(new_len); + if (!*new_op) + return -ENOMEM; + + scnprintf(*new_op, new_len, "%.*s%.*s%.*s%.*s%.*s", + strlen(prefix), prefix, + (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so, + (int)(rm[3].rm_eo - rm[3].rm_so), old_op + rm[3].rm_so, + strlen(new_reg), new_reg, + (int)(rm[5].rm_eo - rm[5].rm_so), old_op + rm[5].rm_so); + + return SDT_ARG_VALID; +} + +uint64_t arch__intr_reg_mask(void) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .sample_type = PERF_SAMPLE_REGS_INTR, + .sample_regs_intr = PERF_REG_EXTENDED_MASK, + .precise_ip = 1, + .disabled = 1, + .exclude_kernel = 1, + }; + int fd; + /* + * In an unnamed union, init it here to build on older gcc versions + */ + attr.sample_period = 1; + + event_attr_init(&attr); + + fd = sys_perf_event_open(&attr, 0, -1, -1, 0); + if (fd != -1) { + close(fd); + return (PERF_REG_EXTENDED_MASK | PERF_REGS_MASK); + } + + return PERF_REGS_MASK; +} diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c new file mode 100644 index 000000000..d48d60851 --- /dev/null +++ b/tools/perf/arch/x86/util/pmu.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <string.h> + +#include <linux/stddef.h> +#include <linux/perf_event.h> + +#include "../../../util/intel-pt.h" +#include "../../../util/intel-bts.h" +#include "../../../util/pmu.h" + +struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) +{ +#ifdef HAVE_AUXTRACE_SUPPORT + if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) + return intel_pt_pmu_default_config(pmu); + if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME)) + pmu->selectable = true; +#endif + return NULL; +} diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c new file mode 100644 index 000000000..2f3d96aa9 --- /dev/null +++ b/tools/perf/arch/x86/util/topdown.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include "api/fs/fs.h" +#include "util/pmu.h" +#include "util/topdown.h" + +/* + * Check whether we can use a group for top down. + * Without a group may get bad results due to multiplexing. + */ +bool arch_topdown_check_group(bool *warn) +{ + int n; + + if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) + return false; + if (n > 0) { + *warn = true; + return false; + } + return true; +} + +void arch_topdown_group_warn(void) +{ + fprintf(stderr, + "nmi_watchdog enabled with topdown. May give wrong results.\n" + "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n"); +} + +#define TOPDOWN_SLOTS 0x0400 + +static bool is_topdown_slots_event(struct evsel *counter) +{ + if (!counter->pmu_name) + return false; + + if (strcmp(counter->pmu_name, "cpu")) + return false; + + if (counter->core.attr.config == TOPDOWN_SLOTS) + return true; + + return false; +} + +/* + * Check whether a topdown group supports sample-read. + * + * Only Topdown metic supports sample-read. The slots + * event must be the leader of the topdown group. + */ + +bool arch_topdown_sample_read(struct evsel *leader) +{ + if (!pmu_have_event("cpu", "slots")) + return false; + + if (is_topdown_slots_event(leader)) + return true; + + return false; +} diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c new file mode 100644 index 000000000..559365f8f --- /dev/null +++ b/tools/perf/arch/x86/util/tsc.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> + +#include "../../../util/tsc.h" + +u64 rdtsc(void) +{ + unsigned int low, high; + + asm volatile("rdtsc" : "=a" (low), "=d" (high)); + + return low | ((u64)high) << 32; +} diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c new file mode 100644 index 000000000..eea2bf872 --- /dev/null +++ b/tools/perf/arch/x86/util/unwind-libdw.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <elfutils/libdwfl.h> +#include "../../../util/unwind-libdw.h" +#include "../../../util/perf_regs.h" +#include "../../../util/event.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[17]; + unsigned nregs; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_X86_##r); \ + val; \ +}) + + if (user_regs->abi == PERF_SAMPLE_REGS_ABI_32) { + dwarf_regs[0] = REG(AX); + dwarf_regs[1] = REG(CX); + dwarf_regs[2] = REG(DX); + dwarf_regs[3] = REG(BX); + dwarf_regs[4] = REG(SP); + dwarf_regs[5] = REG(BP); + dwarf_regs[6] = REG(SI); + dwarf_regs[7] = REG(DI); + dwarf_regs[8] = REG(IP); + nregs = 9; + } else { + dwarf_regs[0] = REG(AX); + dwarf_regs[1] = REG(DX); + dwarf_regs[2] = REG(CX); + dwarf_regs[3] = REG(BX); + dwarf_regs[4] = REG(SI); + dwarf_regs[5] = REG(DI); + dwarf_regs[6] = REG(BP); + dwarf_regs[7] = REG(SP); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(IP); + nregs = 17; + } + + return dwfl_thread_state_registers(thread, 0, nregs, dwarf_regs); +} diff --git a/tools/perf/arch/x86/util/unwind-libunwind.c b/tools/perf/arch/x86/util/unwind-libunwind.c new file mode 100644 index 000000000..47357973b --- /dev/null +++ b/tools/perf/arch/x86/util/unwind-libunwind.c @@ -0,0 +1,115 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <errno.h> +#include "../../util/debug.h" +#ifndef REMOTE_UNWIND_LIBUNWIND +#include <libunwind.h> +#include "perf_regs.h" +#include "../../util/unwind.h" +#endif + +#ifdef HAVE_ARCH_X86_64_SUPPORT +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + int id; + + switch (regnum) { + case UNW_X86_64_RAX: + id = PERF_REG_X86_AX; + break; + case UNW_X86_64_RDX: + id = PERF_REG_X86_DX; + break; + case UNW_X86_64_RCX: + id = PERF_REG_X86_CX; + break; + case UNW_X86_64_RBX: + id = PERF_REG_X86_BX; + break; + case UNW_X86_64_RSI: + id = PERF_REG_X86_SI; + break; + case UNW_X86_64_RDI: + id = PERF_REG_X86_DI; + break; + case UNW_X86_64_RBP: + id = PERF_REG_X86_BP; + break; + case UNW_X86_64_RSP: + id = PERF_REG_X86_SP; + break; + case UNW_X86_64_R8: + id = PERF_REG_X86_R8; + break; + case UNW_X86_64_R9: + id = PERF_REG_X86_R9; + break; + case UNW_X86_64_R10: + id = PERF_REG_X86_R10; + break; + case UNW_X86_64_R11: + id = PERF_REG_X86_R11; + break; + case UNW_X86_64_R12: + id = PERF_REG_X86_R12; + break; + case UNW_X86_64_R13: + id = PERF_REG_X86_R13; + break; + case UNW_X86_64_R14: + id = PERF_REG_X86_R14; + break; + case UNW_X86_64_R15: + id = PERF_REG_X86_R15; + break; + case UNW_X86_64_RIP: + id = PERF_REG_X86_IP; + break; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return id; +} +#else +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + int id; + + switch (regnum) { + case UNW_X86_EAX: + id = PERF_REG_X86_AX; + break; + case UNW_X86_EDX: + id = PERF_REG_X86_DX; + break; + case UNW_X86_ECX: + id = PERF_REG_X86_CX; + break; + case UNW_X86_EBX: + id = PERF_REG_X86_BX; + break; + case UNW_X86_ESI: + id = PERF_REG_X86_SI; + break; + case UNW_X86_EDI: + id = PERF_REG_X86_DI; + break; + case UNW_X86_EBP: + id = PERF_REG_X86_BP; + break; + case UNW_X86_ESP: + id = PERF_REG_X86_SP; + break; + case UNW_X86_EIP: + id = PERF_REG_X86_IP; + break; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return id; +} +#endif /* HAVE_ARCH_X86_64_SUPPORT */ |