diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /tools/perf/arch/powerpc/util | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/arch/powerpc/util')
-rw-r--r-- | tools/perf/arch/powerpc/util/Build | 13 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/book3s_hcalls.h | 124 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/book3s_hv_exits.h | 33 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/dwarf-regs.c | 100 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/event.c | 60 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/evsel.c | 8 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/header.c | 50 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/kvm-stat.c | 219 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/mem-events.c | 12 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/perf_regs.c | 234 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/skip-callchain-idx.c | 287 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/sym-handling.c | 143 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/unwind-libdw.c | 76 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/unwind-libunwind.c | 92 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/utils_header.h | 15 |
15 files changed, 1466 insertions, 0 deletions
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build new file mode 100644 index 0000000000..9889245c55 --- /dev/null +++ b/tools/perf/arch/powerpc/util/Build @@ -0,0 +1,13 @@ +perf-y += header.o +perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o +perf-y += perf_regs.o +perf-y += mem-events.o +perf-y += sym-handling.o +perf-y += evsel.o +perf-y += event.o + +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_DWARF) += skip-callchain-idx.o + +perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o diff --git a/tools/perf/arch/powerpc/util/book3s_hcalls.h b/tools/perf/arch/powerpc/util/book3s_hcalls.h new file mode 100644 index 0000000000..488f4339b8 --- /dev/null +++ b/tools/perf/arch/powerpc/util/book3s_hcalls.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_BOOK3S_HV_HCALLS_H +#define ARCH_PERF_BOOK3S_HV_HCALLS_H + +/* + * PowerPC HCALL codes : hcall code to name mapping + */ +#define kvm_trace_symbol_hcall \ + {0x4, "H_REMOVE"}, \ + {0x8, "H_ENTER"}, \ + {0xc, "H_READ"}, \ + {0x10, "H_CLEAR_MOD"}, \ + {0x14, "H_CLEAR_REF"}, \ + {0x18, "H_PROTECT"}, \ + {0x1c, "H_GET_TCE"}, \ + {0x20, "H_PUT_TCE"}, \ + {0x24, "H_SET_SPRG0"}, \ + {0x28, "H_SET_DABR"}, \ + {0x2c, "H_PAGE_INIT"}, \ + {0x30, "H_SET_ASR"}, \ + {0x34, "H_ASR_ON"}, \ + {0x38, "H_ASR_OFF"}, \ + {0x3c, "H_LOGICAL_CI_LOAD"}, \ + {0x40, "H_LOGICAL_CI_STORE"}, \ + {0x44, "H_LOGICAL_CACHE_LOAD"}, \ + {0x48, "H_LOGICAL_CACHE_STORE"}, \ + {0x4c, "H_LOGICAL_ICBI"}, \ + {0x50, "H_LOGICAL_DCBF"}, \ + {0x54, "H_GET_TERM_CHAR"}, \ + {0x58, "H_PUT_TERM_CHAR"}, \ + {0x5c, "H_REAL_TO_LOGICAL"}, \ + {0x60, "H_HYPERVISOR_DATA"}, \ + {0x64, "H_EOI"}, \ + {0x68, "H_CPPR"}, \ + {0x6c, "H_IPI"}, \ + {0x70, "H_IPOLL"}, \ + {0x74, "H_XIRR"}, \ + {0x78, "H_MIGRATE_DMA"}, \ + {0x7c, "H_PERFMON"}, \ + {0xdc, "H_REGISTER_VPA"}, \ + {0xe0, "H_CEDE"}, \ + {0xe4, "H_CONFER"}, \ + {0xe8, "H_PROD"}, \ + {0xec, "H_GET_PPP"}, \ + {0xf0, "H_SET_PPP"}, \ + {0xf4, "H_PURR"}, \ + {0xf8, "H_PIC"}, \ + {0xfc, "H_REG_CRQ"}, \ + {0x100, "H_FREE_CRQ"}, \ + {0x104, "H_VIO_SIGNAL"}, \ + {0x108, "H_SEND_CRQ"}, \ + {0x110, "H_COPY_RDMA"}, \ + {0x114, "H_REGISTER_LOGICAL_LAN"}, \ + {0x118, "H_FREE_LOGICAL_LAN"}, \ + {0x11c, "H_ADD_LOGICAL_LAN_BUFFER"}, \ + {0x120, "H_SEND_LOGICAL_LAN"}, \ + {0x124, "H_BULK_REMOVE"}, \ + {0x130, "H_MULTICAST_CTRL"}, \ + {0x134, "H_SET_XDABR"}, \ + {0x138, "H_STUFF_TCE"}, \ + {0x13c, "H_PUT_TCE_INDIRECT"}, \ + {0x14c, "H_CHANGE_LOGICAL_LAN_MAC"}, \ + {0x150, "H_VTERM_PARTNER_INFO"}, \ + {0x154, "H_REGISTER_VTERM"}, \ + {0x158, "H_FREE_VTERM"}, \ + {0x15c, "H_RESET_EVENTS"}, \ + {0x160, "H_ALLOC_RESOURCE"}, \ + {0x164, "H_FREE_RESOURCE"}, \ + {0x168, "H_MODIFY_QP"}, \ + {0x16c, "H_QUERY_QP"}, \ + {0x170, "H_REREGISTER_PMR"}, \ + {0x174, "H_REGISTER_SMR"}, \ + {0x178, "H_QUERY_MR"}, \ + {0x17c, "H_QUERY_MW"}, \ + {0x180, "H_QUERY_HCA"}, \ + {0x184, "H_QUERY_PORT"}, \ + {0x188, "H_MODIFY_PORT"}, \ + {0x18c, "H_DEFINE_AQP1"}, \ + {0x190, "H_GET_TRACE_BUFFER"}, \ + {0x194, "H_DEFINE_AQP0"}, \ + {0x198, "H_RESIZE_MR"}, \ + {0x19c, "H_ATTACH_MCQP"}, \ + {0x1a0, "H_DETACH_MCQP"}, \ + {0x1a4, "H_CREATE_RPT"}, \ + {0x1a8, "H_REMOVE_RPT"}, \ + {0x1ac, "H_REGISTER_RPAGES"}, \ + {0x1b0, "H_DISABLE_AND_GET"}, \ + {0x1b4, "H_ERROR_DATA"}, \ + {0x1b8, "H_GET_HCA_INFO"}, \ + {0x1bc, "H_GET_PERF_COUNT"}, \ + {0x1c0, "H_MANAGE_TRACE"}, \ + {0x1d4, "H_FREE_LOGICAL_LAN_BUFFER"}, \ + {0x1d8, "H_POLL_PENDING"}, \ + {0x1e4, "H_QUERY_INT_STATE"}, \ + {0x244, "H_ILLAN_ATTRIBUTES"}, \ + {0x250, "H_MODIFY_HEA_QP"}, \ + {0x254, "H_QUERY_HEA_QP"}, \ + {0x258, "H_QUERY_HEA"}, \ + {0x25c, "H_QUERY_HEA_PORT"}, \ + {0x260, "H_MODIFY_HEA_PORT"}, \ + {0x264, "H_REG_BCMC"}, \ + {0x268, "H_DEREG_BCMC"}, \ + {0x26c, "H_REGISTER_HEA_RPAGES"}, \ + {0x270, "H_DISABLE_AND_GET_HEA"}, \ + {0x274, "H_GET_HEA_INFO"}, \ + {0x278, "H_ALLOC_HEA_RESOURCE"}, \ + {0x284, "H_ADD_CONN"}, \ + {0x288, "H_DEL_CONN"}, \ + {0x298, "H_JOIN"}, \ + {0x2a4, "H_VASI_STATE"}, \ + {0x2b0, "H_ENABLE_CRQ"}, \ + {0x2b8, "H_GET_EM_PARMS"}, \ + {0x2d0, "H_SET_MPP"}, \ + {0x2d4, "H_GET_MPP"}, \ + {0x2ec, "H_HOME_NODE_ASSOCIATIVITY"}, \ + {0x2f4, "H_BEST_ENERGY"}, \ + {0x2fc, "H_XIRR_X"}, \ + {0x300, "H_RANDOM"}, \ + {0x304, "H_COP"}, \ + {0x314, "H_GET_MPP_X"}, \ + {0x31c, "H_SET_MODE"}, \ + {0xf000, "H_RTAS"} \ + +#endif diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h new file mode 100644 index 0000000000..2011376c7a --- /dev/null +++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_BOOK3S_HV_EXITS_H +#define ARCH_PERF_BOOK3S_HV_EXITS_H + +/* + * PowerPC Interrupt vectors : exit code to name mapping + */ + +#define kvm_trace_symbol_exit \ + {0x0, "RETURN_TO_HOST"}, \ + {0x100, "SYSTEM_RESET"}, \ + {0x200, "MACHINE_CHECK"}, \ + {0x300, "DATA_STORAGE"}, \ + {0x380, "DATA_SEGMENT"}, \ + {0x400, "INST_STORAGE"}, \ + {0x480, "INST_SEGMENT"}, \ + {0x500, "EXTERNAL"}, \ + {0x502, "EXTERNAL_HV"}, \ + {0x600, "ALIGNMENT"}, \ + {0x700, "PROGRAM"}, \ + {0x800, "FP_UNAVAIL"}, \ + {0x900, "DECREMENTER"}, \ + {0x980, "HV_DECREMENTER"}, \ + {0xc00, "SYSCALL"}, \ + {0xd00, "TRACE"}, \ + {0xe00, "H_DATA_STORAGE"}, \ + {0xe20, "H_INST_STORAGE"}, \ + {0xe40, "H_EMUL_ASSIST"}, \ + {0xf00, "PERFMON"}, \ + {0xf20, "ALTIVEC"}, \ + {0xf40, "VSX"} + +#endif diff --git a/tools/perf/arch/powerpc/util/dwarf-regs.c b/tools/perf/arch/powerpc/util/dwarf-regs.c new file mode 100644 index 0000000000..0c4f4caf53 --- /dev/null +++ b/tools/perf/arch/powerpc/util/dwarf-regs.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Ian Munsie, IBM Corporation. + */ + +#include <stddef.h> +#include <errno.h> +#include <string.h> +#include <dwarf-regs.h> +#include <linux/ptrace.h> +#include <linux/kernel.h> +#include <linux/stringify.h> + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; + unsigned int ptregs_offset; +}; + +#define REG_DWARFNUM_NAME(r, num) \ + {.name = __stringify(%)__stringify(r), .dwarfnum = num, \ + .ptregs_offset = offsetof(struct pt_regs, r)} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%gpr##num), .dwarfnum = num, \ + .ptregs_offset = offsetof(struct pt_regs, gpr[num])} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0, .ptregs_offset = 0} + +/* + * Reference: + * http://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html + */ +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + GPR_DWARFNUM_NAME(29), + GPR_DWARFNUM_NAME(30), + GPR_DWARFNUM_NAME(31), + REG_DWARFNUM_NAME(msr, 66), + REG_DWARFNUM_NAME(ctr, 109), + REG_DWARFNUM_NAME(link, 108), + REG_DWARFNUM_NAME(xer, 101), + REG_DWARFNUM_NAME(dar, 119), + REG_DWARFNUM_NAME(dsisr, 118), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} + +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_dwarfnum *roff; + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->ptregs_offset; + return -EINVAL; +} diff --git a/tools/perf/arch/powerpc/util/event.c b/tools/perf/arch/powerpc/util/event.c new file mode 100644 index 0000000000..77d8cc2b56 --- /dev/null +++ b/tools/perf/arch/powerpc/util/event.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/string.h> +#include <linux/zalloc.h> + +#include "../../../util/event.h" +#include "../../../util/synthetic-events.h" +#include "../../../util/machine.h" +#include "../../../util/tool.h" +#include "../../../util/map.h" +#include "../../../util/debug.h" +#include "../../../util/sample.h" + +void arch_perf_parse_sample_weight(struct perf_sample *data, + const __u64 *array, u64 type) +{ + union perf_sample_weight weight; + + weight.full = *array; + if (type & PERF_SAMPLE_WEIGHT) + data->weight = weight.full; + else { + data->weight = weight.var1_dw; + data->ins_lat = weight.var2_w; + data->p_stage_cyc = weight.var3_w; + } +} + +void arch_perf_synthesize_sample_weight(const struct perf_sample *data, + __u64 *array, u64 type) +{ + *array = data->weight; + + if (type & PERF_SAMPLE_WEIGHT_STRUCT) { + *array &= 0xffffffff; + *array |= ((u64)data->ins_lat << 32); + } +} + +const char *arch_perf_header_entry(const char *se_header) +{ + if (!strcmp(se_header, "Local INSTR Latency")) + return "Finish Cyc"; + else if (!strcmp(se_header, "INSTR Latency")) + return "Global Finish_cyc"; + else if (!strcmp(se_header, "Local Pipeline Stage Cycle")) + return "Dispatch Cyc"; + else if (!strcmp(se_header, "Pipeline Stage Cycle")) + return "Global Dispatch_cyc"; + return se_header; +} + +int arch_support_sort_key(const char *sort_key) +{ + if (!strcmp(sort_key, "p_stage_cyc")) + return 1; + if (!strcmp(sort_key, "local_p_stage_cyc")) + return 1; + return 0; +} diff --git a/tools/perf/arch/powerpc/util/evsel.c b/tools/perf/arch/powerpc/util/evsel.c new file mode 100644 index 0000000000..2f733cdc8d --- /dev/null +++ b/tools/perf/arch/powerpc/util/evsel.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include "util/evsel.h" + +void arch_evsel__set_sample_weight(struct evsel *evsel) +{ + evsel__set_sample_bit(evsel, WEIGHT_STRUCT); +} diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c new file mode 100644 index 0000000000..c8d0dc775e --- /dev/null +++ b/tools/perf/arch/powerpc/util/header.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <sys/types.h> +#include <errno.h> +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <linux/stringify.h> +#include "header.h" +#include "utils_header.h" +#include "metricgroup.h" +#include <api/fs/fs.h> + +int +get_cpuid(char *buffer, size_t sz) +{ + unsigned long pvr; + int nb; + + pvr = mfspr(SPRN_PVR); + + nb = scnprintf(buffer, sz, "%lu,%lu$", PVR_VER(pvr), PVR_REV(pvr)); + + /* look for end marker to ensure the entire data fit */ + if (strchr(buffer, '$')) { + buffer[nb-1] = '\0'; + return 0; + } + return ENOBUFS; +} + +char * +get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + char *bufp; + + if (asprintf(&bufp, "%.8lx", mfspr(SPRN_PVR)) < 0) + bufp = NULL; + + return bufp; +} + +int arch_get_runtimeparam(const struct pmu_metric *pm) +{ + int count; + char path[PATH_MAX] = "/devices/hv_24x7/interface/"; + + strcat(path, pm->aggr_mode == PerChip ? "sockets" : "coresperchip"); + return sysfs__read_int(path, &count) < 0 ? 1 : count; +} diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c new file mode 100644 index 0000000000..d9a0ac1cdf --- /dev/null +++ b/tools/perf/arch/powerpc/util/kvm-stat.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include "util/kvm-stat.h" +#include "util/parse-events.h" +#include "util/debug.h" +#include "util/evsel.h" +#include "util/evlist.h" +#include "util/pmus.h" + +#include "book3s_hv_exits.h" +#include "book3s_hcalls.h" +#include <subcmd/parse-options.h> + +#define NR_TPS 4 + +const char *vcpu_id_str = "vcpu_id"; +const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter"; +const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit"; + +define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit); +define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall); + +/* Tracepoints specific to ppc_book3s_hv */ +const char *ppc_book3s_hv_kvm_tp[] = { + "kvm_hv:kvm_guest_enter", + "kvm_hv:kvm_guest_exit", + "kvm_hv:kvm_hcall_enter", + "kvm_hv:kvm_hcall_exit", + NULL, +}; + +/* 1 extra placeholder for NULL */ +const char *kvm_events_tp[NR_TPS + 1]; +const char *kvm_exit_reason; + +static void hcall_event_get_key(struct evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + key->info = 0; + key->key = evsel__intval(evsel, sample, "req"); +} + +static const char *get_hcall_exit_reason(u64 exit_code) +{ + struct exit_reasons_table *tbl = hcall_reasons; + + while (tbl->reason != NULL) { + if (tbl->exit_code == exit_code) + return tbl->reason; + tbl++; + } + + pr_debug("Unknown hcall code: %lld\n", + (unsigned long long)exit_code); + return "UNKNOWN"; +} + +static bool hcall_event_end(struct evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + return (evsel__name_is(evsel, kvm_events_tp[3])); +} + +static bool hcall_event_begin(struct evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + if (evsel__name_is(evsel, kvm_events_tp[2])) { + hcall_event_get_key(evsel, sample, key); + return true; + } + + return false; +} +static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, + struct event_key *key, + char *decode) +{ + const char *hcall_reason = get_hcall_exit_reason(key->key); + + scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", hcall_reason); +} + +static struct kvm_events_ops hcall_events = { + .is_begin_event = hcall_event_begin, + .is_end_event = hcall_event_end, + .decode_key = hcall_event_decode_key, + .name = "HCALL-EVENT", +}; + +static struct kvm_events_ops exit_events = { + .is_begin_event = exit_event_begin, + .is_end_event = exit_event_end, + .decode_key = exit_event_decode_key, + .name = "VM-EXIT" +}; + +struct kvm_reg_events_ops kvm_reg_events_ops[] = { + { .name = "vmexit", .ops = &exit_events }, + { .name = "hcall", .ops = &hcall_events }, + { NULL, NULL }, +}; + +const char * const kvm_skip_events[] = { + NULL, +}; + + +static int is_tracepoint_available(const char *str, struct evlist *evlist) +{ + struct parse_events_error err; + int ret; + + parse_events_error__init(&err); + ret = parse_events(evlist, str, &err); + if (err.str) + parse_events_error__print(&err, "tracepoint"); + parse_events_error__exit(&err); + return ret; +} + +static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm, + struct evlist *evlist) +{ + const char **events_ptr; + int i, nr_tp = 0, err = -1; + + /* Check for book3s_hv tracepoints */ + for (events_ptr = ppc_book3s_hv_kvm_tp; *events_ptr; events_ptr++) { + err = is_tracepoint_available(*events_ptr, evlist); + if (err) + return -1; + nr_tp++; + } + + for (i = 0; i < nr_tp; i++) + kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i]; + + kvm_events_tp[i] = NULL; + kvm_exit_reason = "trap"; + kvm->exit_reasons = hv_exit_reasons; + kvm->exit_reasons_isa = "HV"; + + return 0; +} + +/* Wrapper to setup kvm tracepoints */ +static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm) +{ + struct evlist *evlist = evlist__new(); + + if (evlist == NULL) + return -ENOMEM; + + /* Right now, only supported on book3s_hv */ + return ppc__setup_book3s_hv(kvm, evlist); +} + +int setup_kvm_events_tp(struct perf_kvm_stat *kvm) +{ + return ppc__setup_kvm_tp(kvm); +} + +int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused) +{ + int ret; + + ret = ppc__setup_kvm_tp(kvm); + if (ret) { + kvm->exit_reasons = NULL; + kvm->exit_reasons_isa = NULL; + } + + return ret; +} + +/* + * In case of powerpc architecture, pmu registers are programmable + * by guest kernel. So monitoring guest via host may not provide + * valid samples with default 'cycles' event. It is better to use + * 'trace_imc/trace_cycles' event for guest profiling, since it + * can track the guest instruction pointer in the trace-record. + * + * Function to parse the arguments and return appropriate values. + */ +int kvm_add_default_arch_event(int *argc, const char **argv) +{ + const char **tmp; + bool event = false; + int i, j = *argc; + + const struct option event_options[] = { + OPT_BOOLEAN('e', "event", &event, NULL), + OPT_END() + }; + + tmp = calloc(j + 1, sizeof(char *)); + if (!tmp) + return -EINVAL; + + for (i = 0; i < j; i++) + tmp[i] = argv[i]; + + parse_options(j, tmp, event_options, NULL, PARSE_OPT_KEEP_UNKNOWN); + if (!event) { + if (perf_pmus__have_event("trace_imc", "trace_cycles")) { + argv[j++] = strdup("-e"); + argv[j++] = strdup("trace_imc/trace_cycles/"); + *argc += 2; + } else { + free(tmp); + return -EINVAL; + } + } + + free(tmp); + return 0; +} diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c new file mode 100644 index 0000000000..78b986e526 --- /dev/null +++ b/tools/perf/arch/powerpc/util/mem-events.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "map_symbol.h" +#include "mem-events.h" + +/* PowerPC does not support 'ldlat' parameter. */ +const char *perf_mem_events__name(int i, const char *pmu_name __maybe_unused) +{ + if (i == PERF_MEM_EVENTS__LOAD) + return "cpu/mem-loads/"; + + return "cpu/mem-stores/"; +} diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c new file mode 100644 index 0000000000..b38aa056ee --- /dev/null +++ b/tools/perf/arch/powerpc/util/perf_regs.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <errno.h> +#include <string.h> +#include <regex.h> +#include <linux/zalloc.h> + +#include "perf_regs.h" +#include "../../../util/perf_regs.h" +#include "../../../util/debug.h" +#include "../../../util/event.h" +#include "../../../util/header.h" +#include "../../../perf-sys.h" +#include "utils_header.h" + +#include <linux/kernel.h> + +#define PVR_POWER9 0x004E +#define PVR_POWER10 0x0080 + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG(r0, PERF_REG_POWERPC_R0), + SMPL_REG(r1, PERF_REG_POWERPC_R1), + SMPL_REG(r2, PERF_REG_POWERPC_R2), + SMPL_REG(r3, PERF_REG_POWERPC_R3), + SMPL_REG(r4, PERF_REG_POWERPC_R4), + SMPL_REG(r5, PERF_REG_POWERPC_R5), + SMPL_REG(r6, PERF_REG_POWERPC_R6), + SMPL_REG(r7, PERF_REG_POWERPC_R7), + SMPL_REG(r8, PERF_REG_POWERPC_R8), + SMPL_REG(r9, PERF_REG_POWERPC_R9), + SMPL_REG(r10, PERF_REG_POWERPC_R10), + SMPL_REG(r11, PERF_REG_POWERPC_R11), + SMPL_REG(r12, PERF_REG_POWERPC_R12), + SMPL_REG(r13, PERF_REG_POWERPC_R13), + SMPL_REG(r14, PERF_REG_POWERPC_R14), + SMPL_REG(r15, PERF_REG_POWERPC_R15), + SMPL_REG(r16, PERF_REG_POWERPC_R16), + SMPL_REG(r17, PERF_REG_POWERPC_R17), + SMPL_REG(r18, PERF_REG_POWERPC_R18), + SMPL_REG(r19, PERF_REG_POWERPC_R19), + SMPL_REG(r20, PERF_REG_POWERPC_R20), + SMPL_REG(r21, PERF_REG_POWERPC_R21), + SMPL_REG(r22, PERF_REG_POWERPC_R22), + SMPL_REG(r23, PERF_REG_POWERPC_R23), + SMPL_REG(r24, PERF_REG_POWERPC_R24), + SMPL_REG(r25, PERF_REG_POWERPC_R25), + SMPL_REG(r26, PERF_REG_POWERPC_R26), + SMPL_REG(r27, PERF_REG_POWERPC_R27), + SMPL_REG(r28, PERF_REG_POWERPC_R28), + SMPL_REG(r29, PERF_REG_POWERPC_R29), + SMPL_REG(r30, PERF_REG_POWERPC_R30), + SMPL_REG(r31, PERF_REG_POWERPC_R31), + SMPL_REG(nip, PERF_REG_POWERPC_NIP), + SMPL_REG(msr, PERF_REG_POWERPC_MSR), + SMPL_REG(orig_r3, PERF_REG_POWERPC_ORIG_R3), + SMPL_REG(ctr, PERF_REG_POWERPC_CTR), + SMPL_REG(link, PERF_REG_POWERPC_LINK), + SMPL_REG(xer, PERF_REG_POWERPC_XER), + SMPL_REG(ccr, PERF_REG_POWERPC_CCR), + SMPL_REG(softe, PERF_REG_POWERPC_SOFTE), + SMPL_REG(trap, PERF_REG_POWERPC_TRAP), + SMPL_REG(dar, PERF_REG_POWERPC_DAR), + SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR), + SMPL_REG(sier, PERF_REG_POWERPC_SIER), + SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA), + SMPL_REG(mmcr0, PERF_REG_POWERPC_MMCR0), + SMPL_REG(mmcr1, PERF_REG_POWERPC_MMCR1), + SMPL_REG(mmcr2, PERF_REG_POWERPC_MMCR2), + SMPL_REG(mmcr3, PERF_REG_POWERPC_MMCR3), + SMPL_REG(sier2, PERF_REG_POWERPC_SIER2), + SMPL_REG(sier3, PERF_REG_POWERPC_SIER3), + SMPL_REG(pmc1, PERF_REG_POWERPC_PMC1), + SMPL_REG(pmc2, PERF_REG_POWERPC_PMC2), + SMPL_REG(pmc3, PERF_REG_POWERPC_PMC3), + SMPL_REG(pmc4, PERF_REG_POWERPC_PMC4), + SMPL_REG(pmc5, PERF_REG_POWERPC_PMC5), + SMPL_REG(pmc6, PERF_REG_POWERPC_PMC6), + SMPL_REG(sdar, PERF_REG_POWERPC_SDAR), + SMPL_REG(siar, PERF_REG_POWERPC_SIAR), + SMPL_REG_END +}; + +/* REG or %rREG */ +#define SDT_OP_REGEX1 "^(%r)?([1-2]?[0-9]|3[0-1])$" + +/* -NUM(REG) or NUM(REG) or -NUM(%rREG) or NUM(%rREG) */ +#define SDT_OP_REGEX2 "^(\\-)?([0-9]+)\\((%r)?([1-2]?[0-9]|3[0-1])\\)$" + +static regex_t sdt_op_regex1, sdt_op_regex2; + +static int sdt_init_op_regex(void) +{ + static int initialized; + int ret = 0; + + if (initialized) + return 0; + + ret = regcomp(&sdt_op_regex1, SDT_OP_REGEX1, REG_EXTENDED); + if (ret) + goto error; + + ret = regcomp(&sdt_op_regex2, SDT_OP_REGEX2, REG_EXTENDED); + if (ret) + goto free_regex1; + + initialized = 1; + return 0; + +free_regex1: + regfree(&sdt_op_regex1); +error: + pr_debug4("Regex compilation error.\n"); + return ret; +} + +/* + * Parse OP and convert it into uprobe format, which is, +/-NUM(%gprREG). + * Possible variants of OP are: + * Format Example + * ------------------------- + * NUM(REG) 48(18) + * -NUM(REG) -48(18) + * NUM(%rREG) 48(%r18) + * -NUM(%rREG) -48(%r18) + * REG 18 + * %rREG %r18 + * iNUM i0 + * i-NUM i-1 + * + * SDT marker arguments on Powerpc uses %rREG form with -mregnames flag + * and REG form with -mno-regnames. Here REG is general purpose register, + * which is in 0 to 31 range. + */ +int arch_sdt_arg_parse_op(char *old_op, char **new_op) +{ + int ret, new_len; + regmatch_t rm[5]; + char prefix; + + /* Constant argument. Uprobe does not support it */ + if (old_op[0] == 'i') { + pr_debug4("Skipping unsupported SDT argument: %s\n", old_op); + return SDT_ARG_SKIP; + } + + ret = sdt_init_op_regex(); + if (ret < 0) + return ret; + + if (!regexec(&sdt_op_regex1, old_op, 3, rm, 0)) { + /* REG or %rREG --> %gprREG */ + + new_len = 5; /* % g p r NULL */ + new_len += (int)(rm[2].rm_eo - rm[2].rm_so); + + *new_op = zalloc(new_len); + if (!*new_op) + return -ENOMEM; + + scnprintf(*new_op, new_len, "%%gpr%.*s", + (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so); + } else if (!regexec(&sdt_op_regex2, old_op, 5, rm, 0)) { + /* + * -NUM(REG) or NUM(REG) or -NUM(%rREG) or NUM(%rREG) --> + * +/-NUM(%gprREG) + */ + prefix = (rm[1].rm_so == -1) ? '+' : '-'; + + new_len = 8; /* +/- ( % g p r ) NULL */ + new_len += (int)(rm[2].rm_eo - rm[2].rm_so); + new_len += (int)(rm[4].rm_eo - rm[4].rm_so); + + *new_op = zalloc(new_len); + if (!*new_op) + return -ENOMEM; + + scnprintf(*new_op, new_len, "%c%.*s(%%gpr%.*s)", prefix, + (int)(rm[2].rm_eo - rm[2].rm_so), old_op + rm[2].rm_so, + (int)(rm[4].rm_eo - rm[4].rm_so), old_op + rm[4].rm_so); + } else { + pr_debug4("Skipping unsupported SDT argument: %s\n", old_op); + return SDT_ARG_SKIP; + } + + return SDT_ARG_VALID; +} + +uint64_t arch__intr_reg_mask(void) +{ + struct perf_event_attr attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, + .sample_type = PERF_SAMPLE_REGS_INTR, + .precise_ip = 1, + .disabled = 1, + .exclude_kernel = 1, + }; + int fd; + u32 version; + u64 extended_mask = 0, mask = PERF_REGS_MASK; + + /* + * Get the PVR value to set the extended + * mask specific to platform. + */ + version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF); + if (version == PVR_POWER9) + extended_mask = PERF_REG_PMU_MASK_300; + else if (version == PVR_POWER10) + extended_mask = PERF_REG_PMU_MASK_31; + else + return mask; + + attr.sample_regs_intr = extended_mask; + attr.sample_period = 1; + event_attr_init(&attr); + + /* + * check if the pmu supports perf extended regs, before + * returning the register mask to sample. + */ + fd = sys_perf_event_open(&attr, 0, -1, -1, 0); + if (fd != -1) { + close(fd); + mask |= extended_mask; + } + return mask; +} + +uint64_t arch__user_reg_mask(void) +{ + return PERF_REGS_MASK; +} diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c new file mode 100644 index 0000000000..5f3edb3004 --- /dev/null +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Use DWARF Debug information to skip unnecessary callchain entries. + * + * Copyright (C) 2014 Sukadev Bhattiprolu, IBM Corporation. + * Copyright (C) 2014 Ulrich Weigand, IBM Corporation. + */ +#include <inttypes.h> +#include <dwarf.h> +#include <elfutils/libdwfl.h> + +#include "util/thread.h" +#include "util/callchain.h" +#include "util/debug.h" +#include "util/dso.h" +#include "util/event.h" // struct ip_callchain +#include "util/map.h" +#include "util/symbol.h" + +/* + * When saving the callchain on Power, the kernel conservatively saves + * excess entries in the callchain. A few of these entries are needed + * in some cases but not others. If the unnecessary entries are not + * ignored, we end up with duplicate arcs in the call-graphs. Use + * DWARF debug information to skip over any unnecessary callchain + * entries. + * + * See function header for arch_adjust_callchain() below for more details. + * + * The libdwfl code in this file is based on code from elfutils + * (libdwfl/argp-std.c, libdwfl/tests/addrcfi.c, etc). + */ +static char *debuginfo_path; + +static const Dwfl_Callbacks offline_callbacks = { + .debuginfo_path = &debuginfo_path, + .find_debuginfo = dwfl_standard_find_debuginfo, + .section_address = dwfl_offline_section_address, +}; + + +/* + * Use the DWARF expression for the Call-frame-address and determine + * if return address is in LR and if a new frame was allocated. + */ +static int check_return_reg(int ra_regno, Dwarf_Frame *frame) +{ + Dwarf_Op ops_mem[3]; + Dwarf_Op dummy; + Dwarf_Op *ops = &dummy; + size_t nops; + int result; + + result = dwarf_frame_register(frame, ra_regno, ops_mem, &ops, &nops); + if (result < 0) { + pr_debug("dwarf_frame_register() %s\n", dwarf_errmsg(-1)); + return -1; + } + + /* + * Check if return address is on the stack. If return address + * is in a register (typically R0), it is yet to be saved on + * the stack. + */ + if ((nops != 0 || ops != NULL) && + !(nops == 1 && ops[0].atom == DW_OP_regx && + ops[0].number2 == 0 && ops[0].offset == 0)) + return 0; + + /* + * Return address is in LR. Check if a frame was allocated + * but not-yet used. + */ + result = dwarf_frame_cfa(frame, &ops, &nops); + if (result < 0) { + pr_debug("dwarf_frame_cfa() returns %d, %s\n", result, + dwarf_errmsg(-1)); + return -1; + } + + /* + * If call frame address is in r1, no new frame was allocated. + */ + if (nops == 1 && ops[0].atom == DW_OP_bregx && ops[0].number == 1 && + ops[0].number2 == 0) + return 1; + + /* + * A new frame was allocated but has not yet been used. + */ + return 2; +} + +/* + * Get the DWARF frame from the .eh_frame section. + */ +static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc) +{ + int result; + Dwarf_Addr bias; + Dwarf_CFI *cfi; + Dwarf_Frame *frame; + + cfi = dwfl_module_eh_cfi(mod, &bias); + if (!cfi) { + pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1)); + return NULL; + } + + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); + if (result) { + pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); + return NULL; + } + + return frame; +} + +/* + * Get the DWARF frame from the .debug_frame section. + */ +static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc) +{ + Dwarf_CFI *cfi; + Dwarf_Addr bias; + Dwarf_Frame *frame; + int result; + + cfi = dwfl_module_dwarf_cfi(mod, &bias); + if (!cfi) { + pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1)); + return NULL; + } + + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); + if (result) { + pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); + return NULL; + } + + return frame; +} + +/* + * Return: + * 0 if return address for the program counter @pc is on stack + * 1 if return address is in LR and no new stack frame was allocated + * 2 if return address is in LR and a new frame was allocated (but not + * yet used) + * -1 in case of errors + */ +static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) +{ + int rc = -1; + Dwfl *dwfl; + Dwfl_Module *mod; + Dwarf_Frame *frame; + int ra_regno; + Dwarf_Addr start = pc; + Dwarf_Addr end = pc; + bool signalp; + const char *exec_file = dso->long_name; + + dwfl = dso->dwfl; + + if (!dwfl) { + dwfl = dwfl_begin(&offline_callbacks); + if (!dwfl) { + pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1)); + return -1; + } + + mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1, + map_start, false); + if (!mod) { + pr_debug("dwfl_report_elf() failed %s\n", + dwarf_errmsg(-1)); + /* + * We normally cache the DWARF debug info and never + * call dwfl_end(). But to prevent fd leak, free in + * case of error. + */ + dwfl_end(dwfl); + goto out; + } + dso->dwfl = dwfl; + } + + mod = dwfl_addrmodule(dwfl, pc); + if (!mod) { + pr_debug("dwfl_addrmodule() failed, %s\n", dwarf_errmsg(-1)); + goto out; + } + + /* + * To work with split debug info files (eg: glibc), check both + * .eh_frame and .debug_frame sections of the ELF header. + */ + frame = get_eh_frame(mod, pc); + if (!frame) { + frame = get_dwarf_frame(mod, pc); + if (!frame) + goto out; + } + + ra_regno = dwarf_frame_info(frame, &start, &end, &signalp); + if (ra_regno < 0) { + pr_debug("Return address register unavailable: %s\n", + dwarf_errmsg(-1)); + goto out; + } + + rc = check_return_reg(ra_regno, frame); + +out: + return rc; +} + +/* + * The callchain saved by the kernel always includes the link register (LR). + * + * 0: PERF_CONTEXT_USER + * 1: Program counter (Next instruction pointer) + * 2: LR value + * 3: Caller's caller + * 4: ... + * + * The value in LR is only needed when it holds a return address. If the + * return address is on the stack, we should ignore the LR value. + * + * Further, when the return address is in the LR, if a new frame was just + * allocated but the LR was not saved into it, then the LR contains the + * caller, slot 4: contains the caller's caller and the contents of slot 3: + * (chain->ips[3]) is undefined and must be ignored. + * + * Use DWARF debug information to determine if any entries need to be skipped. + * + * Return: + * index: of callchain entry that needs to be ignored (if any) + * -1 if no entry needs to be ignored or in case of errors + */ +int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) +{ + struct addr_location al; + struct dso *dso = NULL; + int rc; + u64 ip; + u64 skip_slot = -1; + + if (!chain || chain->nr < 3) + return skip_slot; + + addr_location__init(&al); + ip = chain->ips[1]; + + thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al); + + if (al.map) + dso = map__dso(al.map); + + if (!dso) { + pr_debug("%" PRIx64 " dso is NULL\n", ip); + addr_location__exit(&al); + return skip_slot; + } + + rc = check_return_addr(dso, map__start(al.map), ip); + + pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n", + dso->long_name, al.sym->name, ip, rc); + + if (rc == 0) { + /* + * Return address on stack. Ignore LR value in callchain + */ + skip_slot = 2; + } else if (rc == 2) { + /* + * New frame allocated but return address still in LR. + * Ignore the caller's caller entry in callchain. + */ + skip_slot = 3; + } + + addr_location__exit(&al); + return skip_slot; +} diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c new file mode 100644 index 0000000000..947bfad7aa --- /dev/null +++ b/tools/perf/arch/powerpc/util/sym-handling.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2015 Naveen N. Rao, IBM Corporation + */ + +#include "dso.h" +#include "symbol.h" +#include "map.h" +#include "probe-event.h" +#include "probe-file.h" + +int arch__choose_best_symbol(struct symbol *syma, + struct symbol *symb __maybe_unused) +{ + char *sym = syma->name; + +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + /* Skip over any initial dot */ + if (*sym == '.') + sym++; +#endif + + /* Avoid "SyS" kernel syscall aliases */ + if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3)) + return SYMBOL_B; + if (strlen(sym) >= 10 && !strncmp(sym, "compat_SyS", 10)) + return SYMBOL_B; + + return SYMBOL_A; +} + +#if !defined(_CALL_ELF) || _CALL_ELF != 2 +/* Allow matching against dot variants */ +int arch__compare_symbol_names(const char *namea, const char *nameb) +{ + /* Skip over initial dot */ + if (*namea == '.') + namea++; + if (*nameb == '.') + nameb++; + + return strcmp(namea, nameb); +} + +int arch__compare_symbol_names_n(const char *namea, const char *nameb, + unsigned int n) +{ + /* Skip over initial dot */ + if (*namea == '.') + namea++; + if (*nameb == '.') + nameb++; + + return strncmp(namea, nameb, n); +} + +const char *arch__normalize_symbol_name(const char *name) +{ + /* Skip over initial dot */ + if (name && *name == '.') + name++; + return name; +} +#endif + +#if defined(_CALL_ELF) && _CALL_ELF == 2 + +#ifdef HAVE_LIBELF_SUPPORT +void arch__sym_update(struct symbol *s, GElf_Sym *sym) +{ + s->arch_sym = sym->st_other; +} +#endif + +#define PPC64LE_LEP_OFFSET 8 + +void arch__fix_tev_from_maps(struct perf_probe_event *pev, + struct probe_trace_event *tev, struct map *map, + struct symbol *sym) +{ + int lep_offset; + + /* + * When probing at a function entry point, we normally always want the + * LEP since that catches calls to the function through both the GEP and + * the LEP. Hence, we would like to probe at an offset of 8 bytes if + * the user only specified the function entry. + * + * However, if the user specifies an offset, we fall back to using the + * GEP since all userspace applications (objdump/readelf) show function + * disassembly with offsets from the GEP. + */ + if (pev->point.offset || !map || !sym) + return; + + /* For kretprobes, add an offset only if the kernel supports it */ + if (!pev->uprobes && pev->point.retprobe) { +#ifdef HAVE_LIBELF_SUPPORT + if (!kretprobe_offset_is_supported()) +#endif + return; + } + + lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym); + + if (map__dso(map)->symtab_type == DSO_BINARY_TYPE__KALLSYMS) + tev->point.offset += PPC64LE_LEP_OFFSET; + else if (lep_offset) { + if (pev->uprobes) + tev->point.address += lep_offset; + else + tev->point.offset += lep_offset; + } +} + +#ifdef HAVE_LIBELF_SUPPORT +void arch__post_process_probe_trace_events(struct perf_probe_event *pev, + int ntevs) +{ + struct probe_trace_event *tev; + struct map *map; + struct symbol *sym = NULL; + struct rb_node *tmp; + int i = 0; + + map = get_target_map(pev->target, pev->nsi, pev->uprobes); + if (!map || map__load(map) < 0) + return; + + for (i = 0; i < ntevs; i++) { + tev = &pev->tevs[i]; + map__for_each_symbol(map, sym, tmp) { + if (map__unmap_ip(map, sym->start) == tev->point.address) { + arch__fix_tev_from_maps(pev, tev, map, sym); + break; + } + } + } +} +#endif /* HAVE_LIBELF_SUPPORT */ + +#endif diff --git a/tools/perf/arch/powerpc/util/unwind-libdw.c b/tools/perf/arch/powerpc/util/unwind-libdw.c new file mode 100644 index 0000000000..e9a5a8bb67 --- /dev/null +++ b/tools/perf/arch/powerpc/util/unwind-libdw.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <elfutils/libdwfl.h> +#include <linux/kernel.h> +#include "perf_regs.h" +#include "../../../util/unwind-libdw.h" +#include "../../../util/perf_regs.h" +#include "../../../util/sample.h" + +/* See backends/ppc_initreg.c and backends/ppc_regs.c in elfutils. */ +static const int special_regs[3][2] = { + { 65, PERF_REG_POWERPC_LINK }, + { 101, PERF_REG_POWERPC_XER }, + { 109, PERF_REG_POWERPC_CTR }, +}; + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[32], dwarf_nip; + size_t i; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_POWERPC_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(R16); + dwarf_regs[17] = REG(R17); + dwarf_regs[18] = REG(R18); + dwarf_regs[19] = REG(R19); + dwarf_regs[20] = REG(R20); + dwarf_regs[21] = REG(R21); + dwarf_regs[22] = REG(R22); + dwarf_regs[23] = REG(R23); + dwarf_regs[24] = REG(R24); + dwarf_regs[25] = REG(R25); + dwarf_regs[26] = REG(R26); + dwarf_regs[27] = REG(R27); + dwarf_regs[28] = REG(R28); + dwarf_regs[29] = REG(R29); + dwarf_regs[30] = REG(R30); + dwarf_regs[31] = REG(R31); + if (!dwfl_thread_state_registers(thread, 0, 32, dwarf_regs)) + return false; + + dwarf_nip = REG(NIP); + dwfl_thread_state_register_pc(thread, dwarf_nip); + for (i = 0; i < ARRAY_SIZE(special_regs); i++) { + Dwarf_Word val = 0; + perf_reg_value(&val, user_regs, special_regs[i][1]); + if (!dwfl_thread_state_registers(thread, + special_regs[i][0], 1, + &val)) + return false; + } + + return true; +} diff --git a/tools/perf/arch/powerpc/util/unwind-libunwind.c b/tools/perf/arch/powerpc/util/unwind-libunwind.c new file mode 100644 index 0000000000..90a6beda20 --- /dev/null +++ b/tools/perf/arch/powerpc/util/unwind-libunwind.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright 2016 Chandan Kumar, IBM Corporation. + */ + +#include <errno.h> +#include <libunwind.h> +#include <asm/perf_regs.h> +#include "../../util/unwind.h" +#include "../../util/debug.h" + +int libunwind__arch_reg_id(int regnum) +{ + switch (regnum) { + case UNW_PPC64_R0: + return PERF_REG_POWERPC_R0; + case UNW_PPC64_R1: + return PERF_REG_POWERPC_R1; + case UNW_PPC64_R2: + return PERF_REG_POWERPC_R2; + case UNW_PPC64_R3: + return PERF_REG_POWERPC_R3; + case UNW_PPC64_R4: + return PERF_REG_POWERPC_R4; + case UNW_PPC64_R5: + return PERF_REG_POWERPC_R5; + case UNW_PPC64_R6: + return PERF_REG_POWERPC_R6; + case UNW_PPC64_R7: + return PERF_REG_POWERPC_R7; + case UNW_PPC64_R8: + return PERF_REG_POWERPC_R8; + case UNW_PPC64_R9: + return PERF_REG_POWERPC_R9; + case UNW_PPC64_R10: + return PERF_REG_POWERPC_R10; + case UNW_PPC64_R11: + return PERF_REG_POWERPC_R11; + case UNW_PPC64_R12: + return PERF_REG_POWERPC_R12; + case UNW_PPC64_R13: + return PERF_REG_POWERPC_R13; + case UNW_PPC64_R14: + return PERF_REG_POWERPC_R14; + case UNW_PPC64_R15: + return PERF_REG_POWERPC_R15; + case UNW_PPC64_R16: + return PERF_REG_POWERPC_R16; + case UNW_PPC64_R17: + return PERF_REG_POWERPC_R17; + case UNW_PPC64_R18: + return PERF_REG_POWERPC_R18; + case UNW_PPC64_R19: + return PERF_REG_POWERPC_R19; + case UNW_PPC64_R20: + return PERF_REG_POWERPC_R20; + case UNW_PPC64_R21: + return PERF_REG_POWERPC_R21; + case UNW_PPC64_R22: + return PERF_REG_POWERPC_R22; + case UNW_PPC64_R23: + return PERF_REG_POWERPC_R23; + case UNW_PPC64_R24: + return PERF_REG_POWERPC_R24; + case UNW_PPC64_R25: + return PERF_REG_POWERPC_R25; + case UNW_PPC64_R26: + return PERF_REG_POWERPC_R26; + case UNW_PPC64_R27: + return PERF_REG_POWERPC_R27; + case UNW_PPC64_R28: + return PERF_REG_POWERPC_R28; + case UNW_PPC64_R29: + return PERF_REG_POWERPC_R29; + case UNW_PPC64_R30: + return PERF_REG_POWERPC_R30; + case UNW_PPC64_R31: + return PERF_REG_POWERPC_R31; + case UNW_PPC64_LR: + return PERF_REG_POWERPC_LINK; + case UNW_PPC64_CTR: + return PERF_REG_POWERPC_CTR; + case UNW_PPC64_XER: + return PERF_REG_POWERPC_XER; + case UNW_PPC64_NIP: + return PERF_REG_POWERPC_NIP; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + return -EINVAL; +} diff --git a/tools/perf/arch/powerpc/util/utils_header.h b/tools/perf/arch/powerpc/util/utils_header.h new file mode 100644 index 0000000000..2baeb1c1ae --- /dev/null +++ b/tools/perf/arch/powerpc/util/utils_header.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PERF_UTIL_HEADER_H +#define __PERF_UTIL_HEADER_H + +#include <linux/stringify.h> + +#define mfspr(rn) ({unsigned long rval; \ + asm volatile("mfspr %0," __stringify(rn) \ + : "=r" (rval)); rval; }) + +#define SPRN_PVR 0x11F /* Processor Version Register */ +#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */ +#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revision field */ + +#endif /* __PERF_UTIL_HEADER_H */ |