summaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
commit7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch)
treebcc69b5f4609f348fac49e2f59e210b29eaea783 /tools/perf/util
parentAdding upstream version 6.9.12. (diff)
downloadlinux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz
linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build16
-rw-r--r--tools/perf/util/annotate-data.c1648
-rw-r--r--tools/perf/util/annotate-data.h74
-rw-r--r--tools/perf/util/annotate.c2213
-rw-r--r--tools/perf/util/annotate.h129
-rw-r--r--tools/perf/util/auxtrace.c15
-rw-r--r--tools/perf/util/auxtrace.h1
-rw-r--r--tools/perf/util/block-info.c24
-rw-r--r--tools/perf/util/block-info.h15
-rw-r--r--tools/perf/util/bpf-event.c8
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c5
-rw-r--r--tools/perf/util/bpf_kwork.c16
-rw-r--r--tools/perf/util/bpf_kwork_top.c12
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c21
-rw-r--r--tools/perf/util/bpf_skel/bench_uprobe.bpf.c16
-rw-r--r--tools/perf/util/build-id.c136
-rw-r--r--tools/perf/util/build-id.h2
-rw-r--r--tools/perf/util/callchain.c4
-rw-r--r--tools/perf/util/cgroup.c4
-rw-r--r--tools/perf/util/comm.c218
-rw-r--r--tools/perf/util/cpumap.c14
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/cs-etm.c15
-rw-r--r--tools/perf/util/data-convert-json.c2
-rw-r--r--tools/perf/util/db-export.c6
-rw-r--r--tools/perf/util/debug.c3
-rw-r--r--tools/perf/util/debug.h1
-rw-r--r--tools/perf/util/disasm.c1837
-rw-r--r--tools/perf/util/disasm.h112
-rw-r--r--tools/perf/util/dlfilter.c12
-rw-r--r--tools/perf/util/dso.c484
-rw-r--r--tools/perf/util/dso.h579
-rw-r--r--tools/perf/util/dsos.c545
-rw-r--r--tools/perf/util/dsos.h40
-rw-r--r--tools/perf/util/dump-insn.h1
-rw-r--r--tools/perf/util/dwarf-aux.c240
-rw-r--r--tools/perf/util/dwarf-aux.h24
-rw-r--r--tools/perf/util/event.c8
-rw-r--r--tools/perf/util/evlist.c3
-rw-r--r--tools/perf/util/evsel.c20
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/genelf.h3
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/help-unknown-cmd.c51
-rw-r--r--tools/perf/util/hist.c78
-rw-r--r--tools/perf/util/hist.h217
-rw-r--r--tools/perf/util/intel-pt.c22
-rw-r--r--tools/perf/util/machine.c225
-rw-r--r--tools/perf/util/machine.h4
-rw-r--r--tools/perf/util/map.c91
-rw-r--r--tools/perf/util/map.h3
-rw-r--r--tools/perf/util/maps.c53
-rw-r--r--tools/perf/util/mem-events.c36
-rw-r--r--tools/perf/util/mem-events.h29
-rw-r--r--tools/perf/util/mem-info.c35
-rw-r--r--tools/perf/util/mem-info.h54
-rw-r--r--tools/perf/util/metricgroup.c10
-rw-r--r--tools/perf/util/metricgroup.h1
-rw-r--r--tools/perf/util/parse-events.c493
-rw-r--r--tools/perf/util/parse-events.h58
-rw-r--r--tools/perf/util/parse-events.l124
-rw-r--r--tools/perf/util/parse-events.y201
-rw-r--r--tools/perf/util/pmu.c172
-rw-r--r--tools/perf/util/pmu.h9
-rw-r--r--tools/perf/util/pmus.c115
-rw-r--r--tools/perf/util/pmus.h3
-rw-r--r--tools/perf/util/print-events.c55
-rw-r--r--tools/perf/util/print_insn.c75
-rw-r--r--tools/perf/util/print_insn.h8
-rw-r--r--tools/perf/util/probe-event.c31
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/perf/util/record.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c45
-rw-r--r--tools/perf/util/session.c26
-rw-r--r--tools/perf/util/session.h2
-rw-r--r--tools/perf/util/sort.c118
-rw-r--r--tools/perf/util/sort.h190
-rw-r--r--tools/perf/util/srcline.c77
-rw-r--r--tools/perf/util/stat-display.c3
-rw-r--r--tools/perf/util/stat-shadow.c7
-rw-r--r--tools/perf/util/stat.c2
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/svghelper.c20
-rw-r--r--tools/perf/util/symbol-elf.c145
-rw-r--r--tools/perf/util/symbol-minimal.c4
-rw-r--r--tools/perf/util/symbol.c228
-rw-r--r--tools/perf/util/symbol.h12
-rw-r--r--tools/perf/util/symbol_fprintf.c4
-rw-r--r--tools/perf/util/synthetic-events.c24
-rw-r--r--tools/perf/util/thread.c4
-rw-r--r--tools/perf/util/tracepoint.c56
-rw-r--r--tools/perf/util/tracepoint.h3
-rw-r--r--tools/perf/util/unwind-libdw.c12
-rw-r--r--tools/perf/util/unwind-libunwind-local.c36
-rw-r--r--tools/perf/util/unwind-libunwind.c2
-rw-r--r--tools/perf/util/values.h1
-rw-r--r--tools/perf/util/vdso.c56
98 files changed, 7765 insertions, 4118 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e0a723e245..da64efd871 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -12,6 +12,7 @@ perf-y += config.o
perf-y += copyfile.o
perf-y += ctype.o
perf-y += db-export.o
+perf-y += disasm.o
perf-y += env.o
perf-y += event.o
perf-y += evlist.o
@@ -140,6 +141,7 @@ perf-y += term.o
perf-y += help-unknown-cmd.o
perf-y += dlfilter.o
perf-y += mem-events.o
+perf-y += mem-info.o
perf-y += vsprintf.o
perf-y += units.o
perf-y += time-utils.o
@@ -388,3 +390,17 @@ $(OUTPUT)util/vsprintf.o: ../lib/vsprintf.c FORCE
$(OUTPUT)util/list_sort.o: ../lib/list_sort.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+ifdef SHELLCHECK
+ SHELL_TESTS := generate-cmdlist.sh
+ TEST_LOGS := $(SHELL_TESTS:%=%.shellcheck_log)
+else
+ SHELL_TESTS :=
+ TEST_LOGS :=
+endif
+
+$(OUTPUT)%.shellcheck_log: %
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false)
+
+perf-y += $(TEST_LOGS)
diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c
index 30c4d19fcf..965da6c0b5 100644
--- a/tools/perf/util/annotate-data.c
+++ b/tools/perf/util/annotate-data.c
@@ -8,6 +8,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
+#include <linux/zalloc.h>
#include "annotate.h"
#include "annotate-data.h"
@@ -19,9 +20,202 @@
#include "evlist.h"
#include "map.h"
#include "map_symbol.h"
+#include "sort.h"
#include "strbuf.h"
#include "symbol.h"
#include "symbol_conf.h"
+#include "thread.h"
+
+/* register number of the stack pointer */
+#define X86_REG_SP 7
+
+static void delete_var_types(struct die_var_type *var_types);
+
+enum type_state_kind {
+ TSR_KIND_INVALID = 0,
+ TSR_KIND_TYPE,
+ TSR_KIND_PERCPU_BASE,
+ TSR_KIND_CONST,
+ TSR_KIND_POINTER,
+ TSR_KIND_CANARY,
+};
+
+#define pr_debug_dtp(fmt, ...) \
+do { \
+ if (debug_type_profile) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug3(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind)
+{
+ struct strbuf sb;
+ char *str;
+ Dwarf_Word size = 0;
+
+ if (!debug_type_profile && verbose < 3)
+ return;
+
+ switch (kind) {
+ case TSR_KIND_INVALID:
+ pr_info("\n");
+ return;
+ case TSR_KIND_PERCPU_BASE:
+ pr_info(" percpu base\n");
+ return;
+ case TSR_KIND_CONST:
+ pr_info(" constant\n");
+ return;
+ case TSR_KIND_POINTER:
+ pr_info(" pointer");
+ /* it also prints the type info */
+ break;
+ case TSR_KIND_CANARY:
+ pr_info(" stack canary\n");
+ return;
+ case TSR_KIND_TYPE:
+ default:
+ break;
+ }
+
+ dwarf_aggregate_size(die, &size);
+
+ strbuf_init(&sb, 32);
+ die_get_typename_from_type(die, &sb);
+ str = strbuf_detach(&sb, NULL);
+ pr_info(" type='%s' size=%#lx (die:%#lx)\n",
+ str, (long)size, (long)dwarf_dieoffset(die));
+ free(str);
+}
+
+static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg)
+{
+ ptrdiff_t off = 0;
+ Dwarf_Attribute attr;
+ Dwarf_Addr base, start, end;
+ Dwarf_Op *ops;
+ size_t nops;
+
+ if (!debug_type_profile && verbose < 3)
+ return;
+
+ if (dwarf_attr(die, DW_AT_location, &attr) == NULL)
+ return;
+
+ while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) {
+ if (reg != DWARF_REG_PC && end < pc)
+ continue;
+ if (reg != DWARF_REG_PC && start > pc)
+ break;
+
+ pr_info(" variable location: ");
+ switch (ops->atom) {
+ case DW_OP_reg0 ...DW_OP_reg31:
+ pr_info("reg%d\n", ops->atom - DW_OP_reg0);
+ break;
+ case DW_OP_breg0 ...DW_OP_breg31:
+ pr_info("base=reg%d, offset=%#lx\n",
+ ops->atom - DW_OP_breg0, (long)ops->number);
+ break;
+ case DW_OP_regx:
+ pr_info("reg%ld\n", (long)ops->number);
+ break;
+ case DW_OP_bregx:
+ pr_info("base=reg%ld, offset=%#lx\n",
+ (long)ops->number, (long)ops->number2);
+ break;
+ case DW_OP_fbreg:
+ pr_info("use frame base, offset=%#lx\n", (long)ops->number);
+ break;
+ case DW_OP_addr:
+ pr_info("address=%#lx\n", (long)ops->number);
+ break;
+ default:
+ pr_info("unknown: code=%#x, number=%#lx\n",
+ ops->atom, (long)ops->number);
+ break;
+ }
+ break;
+ }
+}
+
+/*
+ * Type information in a register, valid when @ok is true.
+ * The @caller_saved registers are invalidated after a function call.
+ */
+struct type_state_reg {
+ Dwarf_Die type;
+ u32 imm_value;
+ bool ok;
+ bool caller_saved;
+ u8 kind;
+};
+
+/* Type information in a stack location, dynamically allocated */
+struct type_state_stack {
+ struct list_head list;
+ Dwarf_Die type;
+ int offset;
+ int size;
+ bool compound;
+ u8 kind;
+};
+
+/* FIXME: This should be arch-dependent */
+#define TYPE_STATE_MAX_REGS 16
+
+/*
+ * State table to maintain type info in each register and stack location.
+ * It'll be updated when new variable is allocated or type info is moved
+ * to a new location (register or stack). As it'd be used with the
+ * shortest path of basic blocks, it only maintains a single table.
+ */
+struct type_state {
+ /* state of general purpose registers */
+ struct type_state_reg regs[TYPE_STATE_MAX_REGS];
+ /* state of stack location */
+ struct list_head stack_vars;
+ /* return value register */
+ int ret_reg;
+ /* stack pointer register */
+ int stack_reg;
+};
+
+static bool has_reg_type(struct type_state *state, int reg)
+{
+ return (unsigned)reg < ARRAY_SIZE(state->regs);
+}
+
+static void init_type_state(struct type_state *state, struct arch *arch)
+{
+ memset(state, 0, sizeof(*state));
+ INIT_LIST_HEAD(&state->stack_vars);
+
+ if (arch__is(arch, "x86")) {
+ state->regs[0].caller_saved = true;
+ state->regs[1].caller_saved = true;
+ state->regs[2].caller_saved = true;
+ state->regs[4].caller_saved = true;
+ state->regs[5].caller_saved = true;
+ state->regs[8].caller_saved = true;
+ state->regs[9].caller_saved = true;
+ state->regs[10].caller_saved = true;
+ state->regs[11].caller_saved = true;
+ state->ret_reg = 0;
+ state->stack_reg = X86_REG_SP;
+ }
+}
+
+static void exit_type_state(struct type_state *state)
+{
+ struct type_state_stack *stack, *tmp;
+
+ list_for_each_entry_safe(stack, tmp, &state->stack_vars, list) {
+ list_del(&stack->list);
+ free(stack);
+ }
+}
/*
* Compare type name and size to maintain them in a tree.
@@ -118,8 +312,8 @@ static void delete_members(struct annotated_member *member)
list_for_each_entry_safe(child, tmp, &member->children, node) {
list_del(&child->node);
delete_members(child);
- free(child->type_name);
- free(child->var_name);
+ zfree(&child->type_name);
+ zfree(&child->var_name);
free(child);
}
}
@@ -143,7 +337,7 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso,
/* Check existing nodes in dso->data_types tree */
key.self.type_name = type_name;
key.self.size = size;
- node = rb_find(&key, &dso->data_types, data_type_cmp);
+ node = rb_find(&key, dso__data_types(dso), data_type_cmp);
if (node) {
result = rb_entry(node, struct annotated_data_type, node);
free(type_name);
@@ -164,7 +358,7 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso,
if (symbol_conf.annotate_data_member)
add_member_types(result, type_die);
- rb_add(&result->node, &dso->data_types, data_type_less);
+ rb_add(&result->node, dso__data_types(dso), data_type_less);
return result;
}
@@ -194,14 +388,22 @@ static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die)
}
/* The type info will be saved in @type_die */
-static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset,
- bool is_pointer)
+static int check_variable(struct data_loc_info *dloc, Dwarf_Die *var_die,
+ Dwarf_Die *type_die, int reg, int offset, bool is_fbreg)
{
Dwarf_Word size;
+ bool is_pointer = true;
+
+ if (reg == DWARF_REG_PC)
+ is_pointer = false;
+ else if (reg == dloc->fbreg || is_fbreg)
+ is_pointer = false;
+ else if (arch__is(dloc->arch, "x86") && reg == X86_REG_SP)
+ is_pointer = false;
/* Get the type of the variable */
if (die_get_real_type(var_die, type_die) == NULL) {
- pr_debug("variable has no type\n");
+ pr_debug_dtp("variable has no type\n");
ann_data_stat.no_typeinfo++;
return -1;
}
@@ -215,7 +417,7 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset,
if ((dwarf_tag(type_die) != DW_TAG_pointer_type &&
dwarf_tag(type_die) != DW_TAG_array_type) ||
die_get_real_type(type_die, type_die) == NULL) {
- pr_debug("no pointer or no type\n");
+ pr_debug_dtp("no pointer or no type\n");
ann_data_stat.no_typeinfo++;
return -1;
}
@@ -223,14 +425,15 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset,
/* Get the size of the actual type */
if (dwarf_aggregate_size(type_die, &size) < 0) {
- pr_debug("type size is unknown\n");
+ pr_debug_dtp("type size is unknown\n");
ann_data_stat.invalid_size++;
return -1;
}
/* Minimal sanity check */
if ((unsigned)offset >= size) {
- pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size);
+ pr_debug_dtp("offset: %d is bigger than size: %"PRIu64"\n",
+ offset, size);
ann_data_stat.bad_offset++;
return -1;
}
@@ -238,23 +441,1191 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset,
return 0;
}
+static struct type_state_stack *find_stack_state(struct type_state *state,
+ int offset)
+{
+ struct type_state_stack *stack;
+
+ list_for_each_entry(stack, &state->stack_vars, list) {
+ if (offset == stack->offset)
+ return stack;
+
+ if (stack->compound && stack->offset < offset &&
+ offset < stack->offset + stack->size)
+ return stack;
+ }
+ return NULL;
+}
+
+static void set_stack_state(struct type_state_stack *stack, int offset, u8 kind,
+ Dwarf_Die *type_die)
+{
+ int tag;
+ Dwarf_Word size;
+
+ if (dwarf_aggregate_size(type_die, &size) < 0)
+ size = 0;
+
+ tag = dwarf_tag(type_die);
+
+ stack->type = *type_die;
+ stack->size = size;
+ stack->offset = offset;
+ stack->kind = kind;
+
+ switch (tag) {
+ case DW_TAG_structure_type:
+ case DW_TAG_union_type:
+ stack->compound = (kind != TSR_KIND_POINTER);
+ break;
+ default:
+ stack->compound = false;
+ break;
+ }
+}
+
+static struct type_state_stack *findnew_stack_state(struct type_state *state,
+ int offset, u8 kind,
+ Dwarf_Die *type_die)
+{
+ struct type_state_stack *stack = find_stack_state(state, offset);
+
+ if (stack) {
+ set_stack_state(stack, offset, kind, type_die);
+ return stack;
+ }
+
+ stack = malloc(sizeof(*stack));
+ if (stack) {
+ set_stack_state(stack, offset, kind, type_die);
+ list_add(&stack->list, &state->stack_vars);
+ }
+ return stack;
+}
+
+/* Maintain a cache for quick global variable lookup */
+struct global_var_entry {
+ struct rb_node node;
+ char *name;
+ u64 start;
+ u64 end;
+ u64 die_offset;
+};
+
+static int global_var_cmp(const void *_key, const struct rb_node *node)
+{
+ const u64 addr = (uintptr_t)_key;
+ struct global_var_entry *gvar;
+
+ gvar = rb_entry(node, struct global_var_entry, node);
+
+ if (gvar->start <= addr && addr < gvar->end)
+ return 0;
+ return gvar->start > addr ? -1 : 1;
+}
+
+static bool global_var_less(struct rb_node *node_a, const struct rb_node *node_b)
+{
+ struct global_var_entry *gvar_a, *gvar_b;
+
+ gvar_a = rb_entry(node_a, struct global_var_entry, node);
+ gvar_b = rb_entry(node_b, struct global_var_entry, node);
+
+ return gvar_a->start < gvar_b->start;
+}
+
+static struct global_var_entry *global_var__find(struct data_loc_info *dloc, u64 addr)
+{
+ struct dso *dso = map__dso(dloc->ms->map);
+ struct rb_node *node;
+
+ node = rb_find((void *)(uintptr_t)addr, dso__global_vars(dso), global_var_cmp);
+ if (node == NULL)
+ return NULL;
+
+ return rb_entry(node, struct global_var_entry, node);
+}
+
+static bool global_var__add(struct data_loc_info *dloc, u64 addr,
+ const char *name, Dwarf_Die *type_die)
+{
+ struct dso *dso = map__dso(dloc->ms->map);
+ struct global_var_entry *gvar;
+ Dwarf_Word size;
+
+ if (dwarf_aggregate_size(type_die, &size) < 0)
+ return false;
+
+ gvar = malloc(sizeof(*gvar));
+ if (gvar == NULL)
+ return false;
+
+ gvar->name = name ? strdup(name) : NULL;
+ if (name && gvar->name == NULL) {
+ free(gvar);
+ return false;
+ }
+
+ gvar->start = addr;
+ gvar->end = addr + size;
+ gvar->die_offset = dwarf_dieoffset(type_die);
+
+ rb_add(&gvar->node, dso__global_vars(dso), global_var_less);
+ return true;
+}
+
+void global_var_type__tree_delete(struct rb_root *root)
+{
+ struct global_var_entry *gvar;
+
+ while (!RB_EMPTY_ROOT(root)) {
+ struct rb_node *node = rb_first(root);
+
+ rb_erase(node, root);
+ gvar = rb_entry(node, struct global_var_entry, node);
+ zfree(&gvar->name);
+ free(gvar);
+ }
+}
+
+static bool get_global_var_info(struct data_loc_info *dloc, u64 addr,
+ const char **var_name, int *var_offset)
+{
+ struct addr_location al;
+ struct symbol *sym;
+ u64 mem_addr;
+
+ /* Kernel symbols might be relocated */
+ mem_addr = addr + map__reloc(dloc->ms->map);
+
+ addr_location__init(&al);
+ sym = thread__find_symbol_fb(dloc->thread, dloc->cpumode,
+ mem_addr, &al);
+ if (sym) {
+ *var_name = sym->name;
+ /* Calculate type offset from the start of variable */
+ *var_offset = mem_addr - map__unmap_ip(al.map, sym->start);
+ } else {
+ *var_name = NULL;
+ }
+ addr_location__exit(&al);
+ if (*var_name == NULL)
+ return false;
+
+ return true;
+}
+
+static void global_var__collect(struct data_loc_info *dloc)
+{
+ Dwarf *dwarf = dloc->di->dbg;
+ Dwarf_Off off, next_off;
+ Dwarf_Die cu_die, type_die;
+ size_t header_size;
+
+ /* Iterate all CU and collect global variables that have no location in a register. */
+ off = 0;
+ while (dwarf_nextcu(dwarf, off, &next_off, &header_size,
+ NULL, NULL, NULL) == 0) {
+ struct die_var_type *var_types = NULL;
+ struct die_var_type *pos;
+
+ if (dwarf_offdie(dwarf, off + header_size, &cu_die) == NULL) {
+ off = next_off;
+ continue;
+ }
+
+ die_collect_global_vars(&cu_die, &var_types);
+
+ for (pos = var_types; pos; pos = pos->next) {
+ const char *var_name = NULL;
+ int var_offset = 0;
+
+ if (pos->reg != -1)
+ continue;
+
+ if (!dwarf_offdie(dwarf, pos->die_off, &type_die))
+ continue;
+
+ if (!get_global_var_info(dloc, pos->addr, &var_name,
+ &var_offset))
+ continue;
+
+ if (var_offset != 0)
+ continue;
+
+ global_var__add(dloc, pos->addr, var_name, &type_die);
+ }
+
+ delete_var_types(var_types);
+
+ off = next_off;
+ }
+}
+
+static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc,
+ u64 ip, u64 var_addr, int *var_offset,
+ Dwarf_Die *type_die)
+{
+ u64 pc;
+ int offset;
+ const char *var_name = NULL;
+ struct global_var_entry *gvar;
+ struct dso *dso = map__dso(dloc->ms->map);
+ Dwarf_Die var_die;
+
+ if (RB_EMPTY_ROOT(dso__global_vars(dso)))
+ global_var__collect(dloc);
+
+ gvar = global_var__find(dloc, var_addr);
+ if (gvar) {
+ if (!dwarf_offdie(dloc->di->dbg, gvar->die_offset, type_die))
+ return false;
+
+ *var_offset = var_addr - gvar->start;
+ return true;
+ }
+
+ /* Try to get the variable by address first */
+ if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) &&
+ check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset,
+ /*is_fbreg=*/false) == 0) {
+ var_name = dwarf_diename(&var_die);
+ *var_offset = offset;
+ goto ok;
+ }
+
+ if (!get_global_var_info(dloc, var_addr, &var_name, var_offset))
+ return false;
+
+ pc = map__rip_2objdump(dloc->ms->map, ip);
+
+ /* Try to get the name of global variable */
+ if (die_find_variable_at(cu_die, var_name, pc, &var_die) &&
+ check_variable(dloc, &var_die, type_die, DWARF_REG_PC, *var_offset,
+ /*is_fbreg=*/false) == 0)
+ goto ok;
+
+ return false;
+
+ok:
+ /* The address should point to the start of the variable */
+ global_var__add(dloc, var_addr - *var_offset, var_name, type_die);
+ return true;
+}
+
+/**
+ * update_var_state - Update type state using given variables
+ * @state: type state table
+ * @dloc: data location info
+ * @addr: instruction address to match with variable
+ * @insn_offset: instruction offset (for debug)
+ * @var_types: list of variables with type info
+ *
+ * This function fills the @state table using @var_types info. Each variable
+ * is used only at the given location and updates an entry in the table.
+ */
+static void update_var_state(struct type_state *state, struct data_loc_info *dloc,
+ u64 addr, u64 insn_offset, struct die_var_type *var_types)
+{
+ Dwarf_Die mem_die;
+ struct die_var_type *var;
+ int fbreg = dloc->fbreg;
+ int fb_offset = 0;
+
+ if (dloc->fb_cfa) {
+ if (die_get_cfa(dloc->di->dbg, addr, &fbreg, &fb_offset) < 0)
+ fbreg = -1;
+ }
+
+ for (var = var_types; var != NULL; var = var->next) {
+ if (var->addr != addr)
+ continue;
+ /* Get the type DIE using the offset */
+ if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die))
+ continue;
+
+ if (var->reg == DWARF_REG_FB) {
+ findnew_stack_state(state, var->offset, TSR_KIND_TYPE,
+ &mem_die);
+
+ pr_debug_dtp("var [%"PRIx64"] -%#x(stack)",
+ insn_offset, -var->offset);
+ pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
+ } else if (var->reg == fbreg) {
+ findnew_stack_state(state, var->offset - fb_offset,
+ TSR_KIND_TYPE, &mem_die);
+
+ pr_debug_dtp("var [%"PRIx64"] -%#x(stack)",
+ insn_offset, -var->offset + fb_offset);
+ pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
+ } else if (has_reg_type(state, var->reg) && var->offset == 0) {
+ struct type_state_reg *reg;
+
+ reg = &state->regs[var->reg];
+ reg->type = mem_die;
+ reg->kind = TSR_KIND_TYPE;
+ reg->ok = true;
+
+ pr_debug_dtp("var [%"PRIx64"] reg%d",
+ insn_offset, var->reg);
+ pr_debug_type_name(&mem_die, TSR_KIND_TYPE);
+ }
+ }
+}
+
+static void update_insn_state_x86(struct type_state *state,
+ struct data_loc_info *dloc, Dwarf_Die *cu_die,
+ struct disasm_line *dl)
+{
+ struct annotated_insn_loc loc;
+ struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE];
+ struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET];
+ struct type_state_reg *tsr;
+ Dwarf_Die type_die;
+ u32 insn_offset = dl->al.offset;
+ int fbreg = dloc->fbreg;
+ int fboff = 0;
+
+ if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0)
+ return;
+
+ if (ins__is_call(&dl->ins)) {
+ struct symbol *func = dl->ops.target.sym;
+
+ if (func == NULL)
+ return;
+
+ /* __fentry__ will preserve all registers */
+ if (!strcmp(func->name, "__fentry__"))
+ return;
+
+ pr_debug_dtp("call [%x] %s\n", insn_offset, func->name);
+
+ /* Otherwise invalidate caller-saved registers after call */
+ for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) {
+ if (state->regs[i].caller_saved)
+ state->regs[i].ok = false;
+ }
+
+ /* Update register with the return type (if any) */
+ if (die_find_func_rettype(cu_die, func->name, &type_die)) {
+ tsr = &state->regs[state->ret_reg];
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("call [%x] return -> reg%d",
+ insn_offset, state->ret_reg);
+ pr_debug_type_name(&type_die, tsr->kind);
+ }
+ return;
+ }
+
+ if (!strncmp(dl->ins.name, "add", 3)) {
+ u64 imm_value = -1ULL;
+ int offset;
+ const char *var_name = NULL;
+ struct map_symbol *ms = dloc->ms;
+ u64 ip = ms->sym->start + dl->al.offset;
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+
+ if (src->imm)
+ imm_value = src->offset;
+ else if (has_reg_type(state, src->reg1) &&
+ state->regs[src->reg1].kind == TSR_KIND_CONST)
+ imm_value = state->regs[src->reg1].imm_value;
+ else if (src->reg1 == DWARF_REG_PC) {
+ u64 var_addr = annotate_calc_pcrel(dloc->ms, ip,
+ src->offset, dl);
+
+ if (get_global_var_info(dloc, var_addr,
+ &var_name, &offset) &&
+ !strcmp(var_name, "this_cpu_off") &&
+ tsr->kind == TSR_KIND_CONST) {
+ tsr->kind = TSR_KIND_PERCPU_BASE;
+ imm_value = tsr->imm_value;
+ }
+ }
+ else
+ return;
+
+ if (tsr->kind != TSR_KIND_PERCPU_BASE)
+ return;
+
+ if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset,
+ &type_die) && offset == 0) {
+ /*
+ * This is not a pointer type, but it should be treated
+ * as a pointer.
+ */
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_POINTER;
+ tsr->ok = true;
+
+ pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d",
+ insn_offset, imm_value, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ return;
+ }
+
+ if (strncmp(dl->ins.name, "mov", 3))
+ return;
+
+ if (dloc->fb_cfa) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 pc = map__rip_2objdump(dloc->ms->map, ip);
+
+ if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
+ fbreg = -1;
+ }
+
+ /* Case 1. register to register or segment:offset to register transfers */
+ if (!src->mem_ref && !dst->mem_ref) {
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+ if (dso__kernel(map__dso(dloc->ms->map)) &&
+ src->segment == INSN_SEG_X86_GS && src->imm) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 var_addr;
+ int offset;
+
+ /*
+ * In kernel, %gs points to a per-cpu region for the
+ * current CPU. Access with a constant offset should
+ * be treated as a global variable access.
+ */
+ var_addr = src->offset;
+
+ if (var_addr == 40) {
+ tsr->kind = TSR_KIND_CANARY;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] stack canary -> reg%d\n",
+ insn_offset, dst->reg1);
+ return;
+ }
+
+ if (!get_global_var_type(cu_die, dloc, ip, var_addr,
+ &offset, &type_die) ||
+ !die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d",
+ insn_offset, var_addr, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ return;
+ }
+
+ if (src->imm) {
+ tsr->kind = TSR_KIND_CONST;
+ tsr->imm_value = src->offset;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n",
+ insn_offset, tsr->imm_value, dst->reg1);
+ return;
+ }
+
+ if (!has_reg_type(state, src->reg1) ||
+ !state->regs[src->reg1].ok) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = state->regs[src->reg1].type;
+ tsr->kind = state->regs[src->reg1].kind;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] reg%d -> reg%d",
+ insn_offset, src->reg1, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Case 2. memory to register transers */
+ if (src->mem_ref && !dst->mem_ref) {
+ int sreg = src->reg1;
+
+ if (!has_reg_type(state, dst->reg1))
+ return;
+
+ tsr = &state->regs[dst->reg1];
+
+retry:
+ /* Check stack variables with offset */
+ if (sreg == fbreg) {
+ struct type_state_stack *stack;
+ int offset = src->offset - fboff;
+
+ stack = find_stack_state(state, offset);
+ if (stack == NULL) {
+ tsr->ok = false;
+ return;
+ } else if (!stack->compound) {
+ tsr->type = stack->type;
+ tsr->kind = stack->kind;
+ tsr->ok = true;
+ } else if (die_get_member_type(&stack->type,
+ offset - stack->offset,
+ &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+ } else {
+ tsr->ok = false;
+ return;
+ }
+
+ pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d",
+ insn_offset, -offset, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* And then dereference the pointer if it has one */
+ else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
+ state->regs[sreg].kind == TSR_KIND_TYPE &&
+ die_deref_ptr_type(&state->regs[sreg].type,
+ src->offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Or check if it's a global variable */
+ else if (sreg == DWARF_REG_PC) {
+ struct map_symbol *ms = dloc->ms;
+ u64 ip = ms->sym->start + dl->al.offset;
+ u64 addr;
+ int offset;
+
+ addr = annotate_calc_pcrel(ms, ip, src->offset, dl);
+
+ if (!get_global_var_type(cu_die, dloc, ip, addr, &offset,
+ &type_die) ||
+ !die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->ok = false;
+ return;
+ }
+
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d",
+ insn_offset, addr, dst->reg1);
+ pr_debug_type_name(&type_die, tsr->kind);
+ }
+ /* And check percpu access with base register */
+ else if (has_reg_type(state, sreg) &&
+ state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) {
+ u64 ip = dloc->ms->sym->start + dl->al.offset;
+ u64 var_addr = src->offset;
+ int offset;
+
+ if (src->multi_regs) {
+ int reg2 = (sreg == src->reg1) ? src->reg2 : src->reg1;
+
+ if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
+ state->regs[reg2].kind == TSR_KIND_CONST)
+ var_addr += state->regs[reg2].imm_value;
+ }
+
+ /*
+ * In kernel, %gs points to a per-cpu region for the
+ * current CPU. Access with a constant offset should
+ * be treated as a global variable access.
+ */
+ if (get_global_var_type(cu_die, dloc, ip, var_addr,
+ &offset, &type_die) &&
+ die_get_member_type(&type_die, offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ if (src->multi_regs) {
+ pr_debug_dtp("mov [%x] percpu %#x(reg%d,reg%d) -> reg%d",
+ insn_offset, src->offset, src->reg1,
+ src->reg2, dst->reg1);
+ } else {
+ pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ }
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ } else {
+ tsr->ok = false;
+ }
+ }
+ /* And then dereference the calculated pointer if it has one */
+ else if (has_reg_type(state, sreg) && state->regs[sreg].ok &&
+ state->regs[sreg].kind == TSR_KIND_POINTER &&
+ die_get_member_type(&state->regs[sreg].type,
+ src->offset, &type_die)) {
+ tsr->type = type_die;
+ tsr->kind = TSR_KIND_TYPE;
+ tsr->ok = true;
+
+ pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d",
+ insn_offset, src->offset, sreg, dst->reg1);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /* Or try another register if any */
+ else if (src->multi_regs && sreg == src->reg1 &&
+ src->reg1 != src->reg2) {
+ sreg = src->reg2;
+ goto retry;
+ }
+ else {
+ int offset;
+ const char *var_name = NULL;
+
+ /* it might be per-cpu variable (in kernel) access */
+ if (src->offset < 0) {
+ if (get_global_var_info(dloc, (s64)src->offset,
+ &var_name, &offset) &&
+ !strcmp(var_name, "__per_cpu_offset")) {
+ tsr->kind = TSR_KIND_PERCPU_BASE;
+
+ pr_debug_dtp("mov [%x] percpu base reg%d\n",
+ insn_offset, dst->reg1);
+ }
+ }
+
+ tsr->ok = false;
+ }
+ }
+ /* Case 3. register to memory transfers */
+ if (!src->mem_ref && dst->mem_ref) {
+ if (!has_reg_type(state, src->reg1) ||
+ !state->regs[src->reg1].ok)
+ return;
+
+ /* Check stack variables with offset */
+ if (dst->reg1 == fbreg) {
+ struct type_state_stack *stack;
+ int offset = dst->offset - fboff;
+
+ tsr = &state->regs[src->reg1];
+
+ stack = find_stack_state(state, offset);
+ if (stack) {
+ /*
+ * The source register is likely to hold a type
+ * of member if it's a compound type. Do not
+ * update the stack variable type since we can
+ * get the member type later by using the
+ * die_get_member_type().
+ */
+ if (!stack->compound)
+ set_stack_state(stack, offset, tsr->kind,
+ &tsr->type);
+ } else {
+ findnew_stack_state(state, offset, tsr->kind,
+ &tsr->type);
+ }
+
+ pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)",
+ insn_offset, src->reg1, -offset);
+ pr_debug_type_name(&tsr->type, tsr->kind);
+ }
+ /*
+ * Ignore other transfers since it'd set a value in a struct
+ * and won't change the type.
+ */
+ }
+ /* Case 4. memory to memory transfers (not handled for now) */
+}
+
+/**
+ * update_insn_state - Update type state for an instruction
+ * @state: type state table
+ * @dloc: data location info
+ * @cu_die: compile unit debug entry
+ * @dl: disasm line for the instruction
+ *
+ * This function updates the @state table for the target operand of the
+ * instruction at @dl if it transfers the type like MOV on x86. Since it
+ * tracks the type, it won't care about the values like in arithmetic
+ * instructions like ADD/SUB/MUL/DIV and INC/DEC.
+ *
+ * Note that ops->reg2 is only available when both mem_ref and multi_regs
+ * are true.
+ */
+static void update_insn_state(struct type_state *state, struct data_loc_info *dloc,
+ Dwarf_Die *cu_die, struct disasm_line *dl)
+{
+ if (arch__is(dloc->arch, "x86"))
+ update_insn_state_x86(state, dloc, cu_die, dl);
+}
+
+/*
+ * Prepend this_blocks (from the outer scope) to full_blocks, removing
+ * duplicate disasm line.
+ */
+static void prepend_basic_blocks(struct list_head *this_blocks,
+ struct list_head *full_blocks)
+{
+ struct annotated_basic_block *first_bb, *last_bb;
+
+ last_bb = list_last_entry(this_blocks, typeof(*last_bb), list);
+ first_bb = list_first_entry(full_blocks, typeof(*first_bb), list);
+
+ if (list_empty(full_blocks))
+ goto out;
+
+ /* Last insn in this_blocks should be same as first insn in full_blocks */
+ if (last_bb->end != first_bb->begin) {
+ pr_debug("prepend basic blocks: mismatched disasm line %"PRIx64" -> %"PRIx64"\n",
+ last_bb->end->al.offset, first_bb->begin->al.offset);
+ goto out;
+ }
+
+ /* Is the basic block have only one disasm_line? */
+ if (last_bb->begin == last_bb->end) {
+ list_del(&last_bb->list);
+ free(last_bb);
+ goto out;
+ }
+
+ /* Point to the insn before the last when adding this block to full_blocks */
+ last_bb->end = list_prev_entry(last_bb->end, al.node);
+
+out:
+ list_splice(this_blocks, full_blocks);
+}
+
+static void delete_basic_blocks(struct list_head *basic_blocks)
+{
+ struct annotated_basic_block *bb, *tmp;
+
+ list_for_each_entry_safe(bb, tmp, basic_blocks, list) {
+ list_del(&bb->list);
+ free(bb);
+ }
+}
+
+/* Make sure all variables have a valid start address */
+static void fixup_var_address(struct die_var_type *var_types, u64 addr)
+{
+ while (var_types) {
+ /*
+ * Some variables have no address range meaning it's always
+ * available in the whole scope. Let's adjust the start
+ * address to the start of the scope.
+ */
+ if (var_types->addr == 0)
+ var_types->addr = addr;
+
+ var_types = var_types->next;
+ }
+}
+
+static void delete_var_types(struct die_var_type *var_types)
+{
+ while (var_types) {
+ struct die_var_type *next = var_types->next;
+
+ free(var_types);
+ var_types = next;
+ }
+}
+
+/* should match to is_stack_canary() in util/annotate.c */
+static void setup_stack_canary(struct data_loc_info *dloc)
+{
+ if (arch__is(dloc->arch, "x86")) {
+ dloc->op->segment = INSN_SEG_X86_GS;
+ dloc->op->imm = true;
+ dloc->op->offset = 40;
+ }
+}
+
+/*
+ * It's at the target address, check if it has a matching type.
+ * It returns 1 if found, 0 if not or -1 if not found but no need to
+ * repeat the search. The last case is for per-cpu variables which
+ * are similar to global variables and no additional info is needed.
+ */
+static int check_matching_type(struct type_state *state,
+ struct data_loc_info *dloc,
+ Dwarf_Die *cu_die, Dwarf_Die *type_die)
+{
+ Dwarf_Word size;
+ u32 insn_offset = dloc->ip - dloc->ms->sym->start;
+ int reg = dloc->op->reg1;
+
+ pr_debug_dtp("chk [%x] reg%d offset=%#x ok=%d kind=%d",
+ insn_offset, reg, dloc->op->offset,
+ state->regs[reg].ok, state->regs[reg].kind);
+
+ if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_TYPE) {
+ int tag = dwarf_tag(&state->regs[reg].type);
+
+ /*
+ * Normal registers should hold a pointer (or array) to
+ * dereference a memory location.
+ */
+ if (tag != DW_TAG_pointer_type && tag != DW_TAG_array_type) {
+ if (dloc->op->offset < 0 && reg != state->stack_reg)
+ goto check_kernel;
+
+ pr_debug_dtp("\n");
+ return -1;
+ }
+
+ pr_debug_dtp("\n");
+
+ /* Remove the pointer and get the target type */
+ if (die_get_real_type(&state->regs[reg].type, type_die) == NULL)
+ return -1;
+
+ dloc->type_offset = dloc->op->offset;
+
+ /* Get the size of the actual type */
+ if (dwarf_aggregate_size(type_die, &size) < 0 ||
+ (unsigned)dloc->type_offset >= size)
+ return -1;
+
+ return 1;
+ }
+
+ if (reg == dloc->fbreg) {
+ struct type_state_stack *stack;
+
+ pr_debug_dtp(" fbreg\n");
+
+ stack = find_stack_state(state, dloc->type_offset);
+ if (stack == NULL)
+ return 0;
+
+ if (stack->kind == TSR_KIND_CANARY) {
+ setup_stack_canary(dloc);
+ return -1;
+ }
+
+ if (stack->kind != TSR_KIND_TYPE)
+ return 0;
+
+ *type_die = stack->type;
+ /* Update the type offset from the start of slot */
+ dloc->type_offset -= stack->offset;
+
+ return 1;
+ }
+
+ if (dloc->fb_cfa) {
+ struct type_state_stack *stack;
+ u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip);
+ int fbreg, fboff;
+
+ pr_debug_dtp(" cfa\n");
+
+ if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0)
+ fbreg = -1;
+
+ if (reg != fbreg)
+ return 0;
+
+ stack = find_stack_state(state, dloc->type_offset - fboff);
+ if (stack == NULL)
+ return 0;
+
+ if (stack->kind == TSR_KIND_CANARY) {
+ setup_stack_canary(dloc);
+ return -1;
+ }
+
+ if (stack->kind != TSR_KIND_TYPE)
+ return 0;
+
+ *type_die = stack->type;
+ /* Update the type offset from the start of slot */
+ dloc->type_offset -= fboff + stack->offset;
+
+ return 1;
+ }
+
+ if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) {
+ u64 var_addr = dloc->op->offset;
+ int var_offset;
+
+ pr_debug_dtp(" percpu var\n");
+
+ if (dloc->op->multi_regs) {
+ int reg2 = dloc->op->reg2;
+
+ if (dloc->op->reg2 == reg)
+ reg2 = dloc->op->reg1;
+
+ if (has_reg_type(state, reg2) && state->regs[reg2].ok &&
+ state->regs[reg2].kind == TSR_KIND_CONST)
+ var_addr += state->regs[reg2].imm_value;
+ }
+
+ if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr,
+ &var_offset, type_die)) {
+ dloc->type_offset = var_offset;
+ return 1;
+ }
+ /* No need to retry per-cpu (global) variables */
+ return -1;
+ }
+
+ if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_POINTER) {
+ pr_debug_dtp(" percpu ptr\n");
+
+ /*
+ * It's actaully pointer but the address was calculated using
+ * some arithmetic. So it points to the actual type already.
+ */
+ *type_die = state->regs[reg].type;
+
+ dloc->type_offset = dloc->op->offset;
+
+ /* Get the size of the actual type */
+ if (dwarf_aggregate_size(type_die, &size) < 0 ||
+ (unsigned)dloc->type_offset >= size)
+ return -1;
+
+ return 1;
+ }
+
+ if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_CANARY) {
+ pr_debug_dtp(" stack canary\n");
+
+ /*
+ * This is a saved value of the stack canary which will be handled
+ * in the outer logic when it returns failure here. Pretend it's
+ * from the stack canary directly.
+ */
+ setup_stack_canary(dloc);
+
+ return -1;
+ }
+
+check_kernel:
+ if (dso__kernel(map__dso(dloc->ms->map))) {
+ u64 addr;
+ int offset;
+
+ /* Direct this-cpu access like "%gs:0x34740" */
+ if (dloc->op->segment == INSN_SEG_X86_GS && dloc->op->imm &&
+ arch__is(dloc->arch, "x86")) {
+ pr_debug_dtp(" this-cpu var\n");
+
+ addr = dloc->op->offset;
+
+ if (get_global_var_type(cu_die, dloc, dloc->ip, addr,
+ &offset, type_die)) {
+ dloc->type_offset = offset;
+ return 1;
+ }
+ return -1;
+ }
+
+ /* Access to global variable like "-0x7dcf0500(,%rdx,8)" */
+ if (dloc->op->offset < 0 && reg != state->stack_reg) {
+ addr = (s64) dloc->op->offset;
+
+ if (get_global_var_type(cu_die, dloc, dloc->ip, addr,
+ &offset, type_die)) {
+ pr_debug_dtp(" global var\n");
+
+ dloc->type_offset = offset;
+ return 1;
+ }
+ pr_debug_dtp(" negative offset\n");
+ return -1;
+ }
+ }
+
+ pr_debug_dtp("\n");
+ return 0;
+}
+
+/* Iterate instructions in basic blocks and update type table */
+static int find_data_type_insn(struct data_loc_info *dloc,
+ struct list_head *basic_blocks,
+ struct die_var_type *var_types,
+ Dwarf_Die *cu_die, Dwarf_Die *type_die)
+{
+ struct type_state state;
+ struct symbol *sym = dloc->ms->sym;
+ struct annotation *notes = symbol__annotation(sym);
+ struct annotated_basic_block *bb;
+ int ret = 0;
+
+ init_type_state(&state, dloc->arch);
+
+ list_for_each_entry(bb, basic_blocks, list) {
+ struct disasm_line *dl = bb->begin;
+
+ BUG_ON(bb->begin->al.offset == -1 || bb->end->al.offset == -1);
+
+ pr_debug_dtp("bb: [%"PRIx64" - %"PRIx64"]\n",
+ bb->begin->al.offset, bb->end->al.offset);
+
+ list_for_each_entry_from(dl, &notes->src->source, al.node) {
+ u64 this_ip = sym->start + dl->al.offset;
+ u64 addr = map__rip_2objdump(dloc->ms->map, this_ip);
+
+ /* Skip comment or debug info lines */
+ if (dl->al.offset == -1)
+ continue;
+
+ /* Update variable type at this address */
+ update_var_state(&state, dloc, addr, dl->al.offset, var_types);
+
+ if (this_ip == dloc->ip) {
+ ret = check_matching_type(&state, dloc,
+ cu_die, type_die);
+ goto out;
+ }
+
+ /* Update type table after processing the instruction */
+ update_insn_state(&state, dloc, cu_die, dl);
+ if (dl == bb->end)
+ break;
+ }
+ }
+
+out:
+ exit_type_state(&state);
+ return ret;
+}
+
+/*
+ * Construct a list of basic blocks for each scope with variables and try to find
+ * the data type by updating a type state table through instructions.
+ */
+static int find_data_type_block(struct data_loc_info *dloc,
+ Dwarf_Die *cu_die, Dwarf_Die *scopes,
+ int nr_scopes, Dwarf_Die *type_die)
+{
+ LIST_HEAD(basic_blocks);
+ struct die_var_type *var_types = NULL;
+ u64 src_ip, dst_ip, prev_dst_ip;
+ int ret = -1;
+
+ /* TODO: other architecture support */
+ if (!arch__is(dloc->arch, "x86"))
+ return -1;
+
+ prev_dst_ip = dst_ip = dloc->ip;
+ for (int i = nr_scopes - 1; i >= 0; i--) {
+ Dwarf_Addr base, start, end;
+ LIST_HEAD(this_blocks);
+ int found;
+
+ if (dwarf_ranges(&scopes[i], 0, &base, &start, &end) < 0)
+ break;
+
+ pr_debug_dtp("scope: [%d/%d] (die:%lx)\n",
+ i + 1, nr_scopes, (long)dwarf_dieoffset(&scopes[i]));
+ src_ip = map__objdump_2rip(dloc->ms->map, start);
+
+again:
+ /* Get basic blocks for this scope */
+ if (annotate_get_basic_blocks(dloc->ms->sym, src_ip, dst_ip,
+ &this_blocks) < 0) {
+ /* Try previous block if they are not connected */
+ if (prev_dst_ip != dst_ip) {
+ dst_ip = prev_dst_ip;
+ goto again;
+ }
+
+ pr_debug_dtp("cannot find a basic block from %"PRIx64" to %"PRIx64"\n",
+ src_ip - dloc->ms->sym->start,
+ dst_ip - dloc->ms->sym->start);
+ continue;
+ }
+ prepend_basic_blocks(&this_blocks, &basic_blocks);
+
+ /* Get variable info for this scope and add to var_types list */
+ die_collect_vars(&scopes[i], &var_types);
+ fixup_var_address(var_types, start);
+
+ /* Find from start of this scope to the target instruction */
+ found = find_data_type_insn(dloc, &basic_blocks, var_types,
+ cu_die, type_die);
+ if (found > 0) {
+ char buf[64];
+
+ if (dloc->op->multi_regs)
+ snprintf(buf, sizeof(buf), "reg%d, reg%d",
+ dloc->op->reg1, dloc->op->reg2);
+ else
+ snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1);
+
+ pr_debug_dtp("found by insn track: %#x(%s) type-offset=%#x\n",
+ dloc->op->offset, buf, dloc->type_offset);
+ pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ ret = 0;
+ break;
+ }
+
+ if (found < 0)
+ break;
+
+ /* Go up to the next scope and find blocks to the start */
+ prev_dst_ip = dst_ip;
+ dst_ip = src_ip;
+ }
+
+ delete_basic_blocks(&basic_blocks);
+ delete_var_types(var_types);
+ return ret;
+}
+
/* The result will be saved in @type_die */
-static int find_data_type_die(struct debuginfo *di, u64 pc, u64 addr,
- const char *var_name, struct annotated_op_loc *loc,
- Dwarf_Die *type_die)
+static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die)
{
+ struct annotated_op_loc *loc = dloc->op;
Dwarf_Die cu_die, var_die;
Dwarf_Die *scopes = NULL;
int reg, offset;
int ret = -1;
int i, nr_scopes;
int fbreg = -1;
- bool is_fbreg = false;
int fb_offset = 0;
+ bool is_fbreg = false;
+ u64 pc;
+ char buf[64];
+
+ if (dloc->op->multi_regs)
+ snprintf(buf, sizeof(buf), "reg%d, reg%d", dloc->op->reg1, dloc->op->reg2);
+ else if (dloc->op->reg1 == DWARF_REG_PC)
+ snprintf(buf, sizeof(buf), "PC");
+ else
+ snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1);
+
+ pr_debug_dtp("-----------------------------------------------------------\n");
+ pr_debug_dtp("find data type for %#x(%s) at %s+%#"PRIx64"\n",
+ dloc->op->offset, buf, dloc->ms->sym->name,
+ dloc->ip - dloc->ms->sym->start);
+
+ /*
+ * IP is a relative instruction address from the start of the map, as
+ * it can be randomized/relocated, it needs to translate to PC which is
+ * a file address for DWARF processing.
+ */
+ pc = map__rip_2objdump(dloc->ms->map, dloc->ip);
/* Get a compile_unit for this address */
- if (!find_cu_die(di, pc, &cu_die)) {
- pr_debug("cannot find CU for address %" PRIx64 "\n", pc);
+ if (!find_cu_die(dloc->di, pc, &cu_die)) {
+ pr_debug_dtp("cannot find CU for address %"PRIx64"\n", pc);
ann_data_stat.no_cuinfo++;
return -1;
}
@@ -262,19 +1633,18 @@ static int find_data_type_die(struct debuginfo *di, u64 pc, u64 addr,
reg = loc->reg1;
offset = loc->offset;
- if (reg == DWARF_REG_PC) {
- if (die_find_variable_by_addr(&cu_die, pc, addr, &var_die, &offset)) {
- ret = check_variable(&var_die, type_die, offset,
- /*is_pointer=*/false);
- loc->offset = offset;
- goto out;
- }
+ pr_debug_dtp("CU for %s (die:%#lx)\n",
+ dwarf_diename(&cu_die), (long)dwarf_dieoffset(&cu_die));
- if (var_name && die_find_variable_at(&cu_die, var_name, pc,
- &var_die)) {
- ret = check_variable(&var_die, type_die, 0,
- /*is_pointer=*/false);
- /* loc->offset will be updated by the caller */
+ if (reg == DWARF_REG_PC) {
+ if (get_global_var_type(&cu_die, dloc, dloc->ip, dloc->var_addr,
+ &offset, type_die)) {
+ dloc->type_offset = offset;
+
+ pr_debug_dtp("found by addr=%#"PRIx64" type_offset=%#x\n",
+ dloc->var_addr, offset);
+ pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ ret = 0;
goto out;
}
}
@@ -291,16 +1661,20 @@ static int find_data_type_die(struct debuginfo *di, u64 pc, u64 addr,
dwarf_formblock(&attr, &block) == 0 && block.length == 1) {
switch (*block.data) {
case DW_OP_reg0 ... DW_OP_reg31:
- fbreg = *block.data - DW_OP_reg0;
+ fbreg = dloc->fbreg = *block.data - DW_OP_reg0;
break;
case DW_OP_call_frame_cfa:
- if (die_get_cfa(di->dbg, pc, &fbreg,
+ dloc->fb_cfa = true;
+ if (die_get_cfa(dloc->di->dbg, pc, &fbreg,
&fb_offset) < 0)
fbreg = -1;
break;
default:
break;
}
+
+ pr_debug_dtp("frame base: cfa=%d fbreg=%d\n",
+ dloc->fb_cfa, fbreg);
}
}
@@ -312,7 +1686,7 @@ retry:
/* Search from the inner-most scope to the outer */
for (i = nr_scopes - 1; i >= 0; i--) {
if (reg == DWARF_REG_PC) {
- if (!die_find_variable_by_addr(&scopes[i], pc, addr,
+ if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr,
&var_die, &offset))
continue;
} else {
@@ -323,9 +1697,30 @@ retry:
}
/* Found a variable, see if it's correct */
- ret = check_variable(&var_die, type_die, offset,
- reg != DWARF_REG_PC && !is_fbreg);
- loc->offset = offset;
+ ret = check_variable(dloc, &var_die, type_die, reg, offset, is_fbreg);
+ if (ret == 0) {
+ pr_debug_dtp("found \"%s\" in scope=%d/%d (die: %#lx) ",
+ dwarf_diename(&var_die), i+1, nr_scopes,
+ (long)dwarf_dieoffset(&scopes[i]));
+ if (reg == DWARF_REG_PC) {
+ pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n",
+ dloc->var_addr, offset);
+ } else if (reg == DWARF_REG_FB || is_fbreg) {
+ pr_debug_dtp("stack_offset=%#x type_offset=%#x\n",
+ fb_offset, offset);
+ } else {
+ pr_debug_dtp("type_offset=%#x\n", offset);
+ }
+ pr_debug_location(&var_die, pc, reg);
+ pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ } else {
+ pr_debug_dtp("check variable \"%s\" failed (die: %#lx)\n",
+ dwarf_diename(&var_die),
+ (long)dwarf_dieoffset(&var_die));
+ pr_debug_location(&var_die, pc, reg);
+ pr_debug_type_name(type_die, TSR_KIND_TYPE);
+ }
+ dloc->type_offset = offset;
goto out;
}
@@ -334,8 +1729,19 @@ retry:
goto retry;
}
- if (ret < 0)
+ if (reg != DWARF_REG_PC) {
+ ret = find_data_type_block(dloc, &cu_die, scopes,
+ nr_scopes, type_die);
+ if (ret == 0) {
+ ann_data_stat.insn_track++;
+ goto out;
+ }
+ }
+
+ if (ret < 0) {
+ pr_debug_dtp("no variable found\n");
ann_data_stat.no_var++;
+ }
out:
free(scopes);
@@ -344,50 +1750,45 @@ out:
/**
* find_data_type - Return a data type at the location
- * @ms: map and symbol at the location
- * @ip: instruction address of the memory access
- * @loc: instruction operand location
- * @addr: data address of the memory access
- * @var_name: global variable name
+ * @dloc: data location
*
* This functions searches the debug information of the binary to get the data
- * type it accesses. The exact location is expressed by (@ip, reg, offset)
- * for pointer variables or (@ip, @addr) for global variables. Note that global
- * variables might update the @loc->offset after finding the start of the variable.
- * If it cannot find a global variable by address, it tried to fine a declaration
- * of the variable using @var_name. In that case, @loc->offset won't be updated.
+ * type it accesses. The exact location is expressed by (ip, reg, offset)
+ * for pointer variables or (ip, addr) for global variables. Note that global
+ * variables might update the @dloc->type_offset after finding the start of the
+ * variable. If it cannot find a global variable by address, it tried to find
+ * a declaration of the variable using var_name. In that case, @dloc->offset
+ * won't be updated.
*
* It return %NULL if not found.
*/
-struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
- struct annotated_op_loc *loc, u64 addr,
- const char *var_name)
+struct annotated_data_type *find_data_type(struct data_loc_info *dloc)
{
struct annotated_data_type *result = NULL;
- struct dso *dso = map__dso(ms->map);
- struct debuginfo *di;
+ struct dso *dso = map__dso(dloc->ms->map);
Dwarf_Die type_die;
- u64 pc;
- di = debuginfo__new(dso->long_name);
- if (di == NULL) {
- pr_debug("cannot get the debug info\n");
+ dloc->di = debuginfo__new(dso__long_name(dso));
+ if (dloc->di == NULL) {
+ pr_debug_dtp("cannot get the debug info\n");
return NULL;
}
/*
- * IP is a relative instruction address from the start of the map, as
- * it can be randomized/relocated, it needs to translate to PC which is
- * a file address for DWARF processing.
+ * The type offset is the same as instruction offset by default.
+ * But when finding a global variable, the offset won't be valid.
*/
- pc = map__rip_2objdump(ms->map, ip);
- if (find_data_type_die(di, pc, addr, var_name, loc, &type_die) < 0)
+ dloc->type_offset = dloc->op->offset;
+
+ dloc->fbreg = -1;
+
+ if (find_data_type_die(dloc, &type_die) < 0)
goto out;
result = dso__findnew_data_type(dso, &type_die);
out:
- debuginfo__delete(di);
+ debuginfo__delete(dloc->di);
return result;
}
@@ -399,7 +1800,6 @@ static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_en
sz += sizeof(struct type_hist_entry) * adt->self.size;
/* Allocate a table of pointers for each event */
- adt->nr_histograms = nr_entries;
adt->histograms = calloc(nr_entries, sizeof(*adt->histograms));
if (adt->histograms == NULL)
return -ENOMEM;
@@ -413,20 +1813,24 @@ static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_en
if (adt->histograms[i] == NULL)
goto err;
}
+
+ adt->nr_histograms = nr_entries;
return 0;
err:
while (--i >= 0)
- free(adt->histograms[i]);
- free(adt->histograms);
+ zfree(&(adt->histograms[i]));
+ zfree(&adt->histograms);
return -ENOMEM;
}
static void delete_data_type_histograms(struct annotated_data_type *adt)
{
for (int i = 0; i < adt->nr_histograms; i++)
- free(adt->histograms[i]);
- free(adt->histograms);
+ zfree(&(adt->histograms[i]));
+
+ zfree(&adt->histograms);
+ adt->nr_histograms = 0;
}
void annotated_data_type__tree_delete(struct rb_root *root)
@@ -440,7 +1844,7 @@ void annotated_data_type__tree_delete(struct rb_root *root)
pos = rb_entry(node, struct annotated_data_type, node);
delete_members(&pos->self);
delete_data_type_histograms(pos);
- free(pos->self.type_name);
+ zfree(&pos->self.type_name);
free(pos);
}
}
@@ -484,3 +1888,115 @@ int annotated_data_type__update_samples(struct annotated_data_type *adt,
h->addr[offset].period += period;
return 0;
}
+
+static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel)
+{
+ struct dso *dso = map__dso(he->ms.map);
+ int nr_members = 1;
+ int nr_samples = he->stat.nr_events;
+ int width = 7;
+ const char *val_hdr = "Percent";
+
+ if (evsel__is_group_event(evsel)) {
+ struct hist_entry *pair;
+
+ list_for_each_entry(pair, &he->pairs.head, pairs.node)
+ nr_samples += pair->stat.nr_events;
+ }
+
+ printf("Annotate type: '%s' in %s (%d samples):\n",
+ he->mem_type->self.type_name, dso__name(dso), nr_samples);
+
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+ int i = 0;
+
+ for_each_group_evsel(pos, evsel)
+ printf(" event[%d] = %s\n", i++, pos->name);
+
+ nr_members = evsel->core.nr_members;
+ }
+
+ if (symbol_conf.show_total_period) {
+ width = 11;
+ val_hdr = "Period";
+ } else if (symbol_conf.show_nr_samples) {
+ width = 7;
+ val_hdr = "Samples";
+ }
+
+ printf("============================================================================\n");
+ printf("%*s %10s %10s %s\n", (width + 1) * nr_members, val_hdr,
+ "offset", "size", "field");
+}
+
+static void print_annotated_data_value(struct type_hist *h, u64 period, int nr_samples)
+{
+ double percent = h->period ? (100.0 * period / h->period) : 0;
+ const char *color = get_percent_color(percent);
+
+ if (symbol_conf.show_total_period)
+ color_fprintf(stdout, color, " %11" PRIu64, period);
+ else if (symbol_conf.show_nr_samples)
+ color_fprintf(stdout, color, " %7d", nr_samples);
+ else
+ color_fprintf(stdout, color, " %7.2f", percent);
+}
+
+static void print_annotated_data_type(struct annotated_data_type *mem_type,
+ struct annotated_member *member,
+ struct evsel *evsel, int indent)
+{
+ struct annotated_member *child;
+ struct type_hist *h = mem_type->histograms[evsel->core.idx];
+ int i, nr_events = 1, samples = 0;
+ u64 period = 0;
+ int width = symbol_conf.show_total_period ? 11 : 7;
+
+ for (i = 0; i < member->size; i++) {
+ samples += h->addr[member->offset + i].nr_samples;
+ period += h->addr[member->offset + i].period;
+ }
+ print_annotated_data_value(h, period, samples);
+
+ if (evsel__is_group_event(evsel)) {
+ struct evsel *pos;
+
+ for_each_group_member(pos, evsel) {
+ h = mem_type->histograms[pos->core.idx];
+
+ samples = 0;
+ period = 0;
+ for (i = 0; i < member->size; i++) {
+ samples += h->addr[member->offset + i].nr_samples;
+ period += h->addr[member->offset + i].period;
+ }
+ print_annotated_data_value(h, period, samples);
+ }
+ nr_events = evsel->core.nr_members;
+ }
+
+ printf(" %10d %10d %*s%s\t%s",
+ member->offset, member->size, indent, "", member->type_name,
+ member->var_name ?: "");
+
+ if (!list_empty(&member->children))
+ printf(" {\n");
+
+ list_for_each_entry(child, &member->children, node)
+ print_annotated_data_type(mem_type, child, evsel, indent + 4);
+
+ if (!list_empty(&member->children))
+ printf("%*s}", (width + 1) * nr_events + 24 + indent, "");
+ printf(";\n");
+}
+
+int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel)
+{
+ print_annotated_data_header(he, evsel);
+ print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0);
+ printf("\n");
+
+ /* move to the next entry */
+ return '>';
+}
diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h
index 1b0db8e8c4..0a57d9f5ee 100644
--- a/tools/perf/util/annotate-data.h
+++ b/tools/perf/util/annotate-data.h
@@ -8,8 +8,12 @@
#include <linux/types.h>
struct annotated_op_loc;
+struct debuginfo;
struct evsel;
+struct hist_browser_timer;
+struct hist_entry;
struct map_symbol;
+struct thread;
/**
* struct annotated_member - Type of member field
@@ -71,6 +75,40 @@ struct annotated_data_type {
extern struct annotated_data_type unknown_type;
extern struct annotated_data_type stackop_type;
+extern struct annotated_data_type canary_type;
+
+/**
+ * struct data_loc_info - Data location information
+ * @arch: CPU architecture info
+ * @thread: Thread info
+ * @ms: Map and Symbol info
+ * @ip: Instruction address
+ * @var_addr: Data address (for global variables)
+ * @cpumode: CPU execution mode
+ * @op: Instruction operand location (regs and offset)
+ * @di: Debug info
+ * @fbreg: Frame base register
+ * @fb_cfa: Whether the frame needs to check CFA
+ * @type_offset: Final offset in the type
+ */
+struct data_loc_info {
+ /* These are input field, should be filled by caller */
+ struct arch *arch;
+ struct thread *thread;
+ struct map_symbol *ms;
+ u64 ip;
+ u64 var_addr;
+ u8 cpumode;
+ struct annotated_op_loc *op;
+
+ /* These are used internally */
+ struct debuginfo *di;
+ int fbreg;
+ bool fb_cfa;
+
+ /* This is for the result */
+ int type_offset;
+};
/**
* struct annotated_data_stat - Debug statistics
@@ -100,15 +138,14 @@ struct annotated_data_stat {
int no_typeinfo;
int invalid_size;
int bad_offset;
+ int insn_track;
};
extern struct annotated_data_stat ann_data_stat;
#ifdef HAVE_DWARF_SUPPORT
/* Returns data type at the location (ip, reg, offset) */
-struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip,
- struct annotated_op_loc *loc, u64 addr,
- const char *var_name);
+struct annotated_data_type *find_data_type(struct data_loc_info *dloc);
/* Update type access histogram at the given offset */
int annotated_data_type__update_samples(struct annotated_data_type *adt,
@@ -118,12 +155,15 @@ int annotated_data_type__update_samples(struct annotated_data_type *adt,
/* Release all data type information in the tree */
void annotated_data_type__tree_delete(struct rb_root *root);
+/* Release all global variable information in the tree */
+void global_var_type__tree_delete(struct rb_root *root);
+
+int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel);
+
#else /* HAVE_DWARF_SUPPORT */
static inline struct annotated_data_type *
-find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused,
- struct annotated_op_loc *loc __maybe_unused,
- u64 addr __maybe_unused, const char *var_name __maybe_unused)
+find_data_type(struct data_loc_info *dloc __maybe_unused)
{
return NULL;
}
@@ -142,6 +182,28 @@ static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe
{
}
+static inline void global_var_type__tree_delete(struct rb_root *root __maybe_unused)
+{
+}
+
+static inline int hist_entry__annotate_data_tty(struct hist_entry *he __maybe_unused,
+ struct evsel *evsel __maybe_unused)
+{
+ return -1;
+}
+
#endif /* HAVE_DWARF_SUPPORT */
+#ifdef HAVE_SLANG_SUPPORT
+int hist_entry__annotate_data_tui(struct hist_entry *he, struct evsel *evsel,
+ struct hist_browser_timer *hbt);
+#else
+static inline int hist_entry__annotate_data_tui(struct hist_entry *he __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct hist_browser_timer *hbt __maybe_unused)
+{
+ return -1;
+}
+#endif /* HAVE_SLANG_SUPPORT */
+
#endif /* _PERF_ANNOTATE_DATA_H */
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 79d082155c..1451caf25e 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -16,6 +16,7 @@
#include "build-id.h"
#include "color.h"
#include "config.h"
+#include "disasm.h"
#include "dso.h"
#include "env.h"
#include "map.h"
@@ -64,47 +65,6 @@
/* global annotation options */
struct annotation_options annotate_opts;
-static regex_t file_lineno;
-
-static struct ins_ops *ins__find(struct arch *arch, const char *name);
-static void ins__sort(struct arch *arch);
-static int disasm_line__parse(char *line, const char **namep, char **rawp);
-static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name);
-static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name);
-
-struct arch {
- const char *name;
- struct ins *instructions;
- size_t nr_instructions;
- size_t nr_instructions_allocated;
- struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
- bool sorted_instructions;
- bool initialized;
- const char *insn_suffix;
- void *priv;
- unsigned int model;
- unsigned int family;
- int (*init)(struct arch *arch, char *cpuid);
- bool (*ins_is_fused)(struct arch *arch, const char *ins1,
- const char *ins2);
- struct {
- char comment_char;
- char skip_functions_char;
- char register_char;
- char memory_ref_char;
- } objdump;
-};
-
-static struct ins_ops call_ops;
-static struct ins_ops dec_ops;
-static struct ins_ops jump_ops;
-static struct ins_ops mov_ops;
-static struct ins_ops nop_ops;
-static struct ins_ops lock_ops;
-static struct ins_ops ret_ops;
-
/* Data type collection debug statistics */
struct annotated_data_stat ann_data_stat;
LIST_HEAD(ann_insn_stat);
@@ -117,753 +77,13 @@ struct annotated_data_type stackop_type = {
},
};
-static int arch__grow_instructions(struct arch *arch)
-{
- struct ins *new_instructions;
- size_t new_nr_allocated;
-
- if (arch->nr_instructions_allocated == 0 && arch->instructions)
- goto grow_from_non_allocated_table;
-
- new_nr_allocated = arch->nr_instructions_allocated + 128;
- new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
- if (new_instructions == NULL)
- return -1;
-
-out_update_instructions:
- arch->instructions = new_instructions;
- arch->nr_instructions_allocated = new_nr_allocated;
- return 0;
-
-grow_from_non_allocated_table:
- new_nr_allocated = arch->nr_instructions + 128;
- new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
- if (new_instructions == NULL)
- return -1;
-
- memcpy(new_instructions, arch->instructions, arch->nr_instructions);
- goto out_update_instructions;
-}
-
-static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
-{
- struct ins *ins;
-
- if (arch->nr_instructions == arch->nr_instructions_allocated &&
- arch__grow_instructions(arch))
- return -1;
-
- ins = &arch->instructions[arch->nr_instructions];
- ins->name = strdup(name);
- if (!ins->name)
- return -1;
-
- ins->ops = ops;
- arch->nr_instructions++;
-
- ins__sort(arch);
- return 0;
-}
-
-#include "arch/arc/annotate/instructions.c"
-#include "arch/arm/annotate/instructions.c"
-#include "arch/arm64/annotate/instructions.c"
-#include "arch/csky/annotate/instructions.c"
-#include "arch/loongarch/annotate/instructions.c"
-#include "arch/mips/annotate/instructions.c"
-#include "arch/x86/annotate/instructions.c"
-#include "arch/powerpc/annotate/instructions.c"
-#include "arch/riscv64/annotate/instructions.c"
-#include "arch/s390/annotate/instructions.c"
-#include "arch/sparc/annotate/instructions.c"
-
-static struct arch architectures[] = {
- {
- .name = "arc",
- .init = arc__annotate_init,
- },
- {
- .name = "arm",
- .init = arm__annotate_init,
- },
- {
- .name = "arm64",
- .init = arm64__annotate_init,
- },
- {
- .name = "csky",
- .init = csky__annotate_init,
- },
- {
- .name = "mips",
- .init = mips__annotate_init,
- .objdump = {
- .comment_char = '#',
- },
- },
- {
- .name = "x86",
- .init = x86__annotate_init,
- .instructions = x86__instructions,
- .nr_instructions = ARRAY_SIZE(x86__instructions),
- .insn_suffix = "bwlq",
- .objdump = {
- .comment_char = '#',
- .register_char = '%',
- .memory_ref_char = '(',
- },
- },
- {
- .name = "powerpc",
- .init = powerpc__annotate_init,
- },
- {
- .name = "riscv64",
- .init = riscv64__annotate_init,
- },
- {
- .name = "s390",
- .init = s390__annotate_init,
- .objdump = {
- .comment_char = '#',
- },
- },
- {
- .name = "sparc",
- .init = sparc__annotate_init,
- .objdump = {
- .comment_char = '#',
- },
- },
- {
- .name = "loongarch",
- .init = loongarch__annotate_init,
- .objdump = {
- .comment_char = '#',
- },
+struct annotated_data_type canary_type = {
+ .self = {
+ .type_name = (char *)"(stack canary)",
+ .children = LIST_HEAD_INIT(canary_type.self.children),
},
};
-static void ins__delete(struct ins_operands *ops)
-{
- if (ops == NULL)
- return;
- zfree(&ops->source.raw);
- zfree(&ops->source.name);
- zfree(&ops->target.raw);
- zfree(&ops->target.name);
-}
-
-static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
-}
-
-int ins__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- if (ins->ops->scnprintf)
- return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
-
- return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
-}
-
-bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
-{
- if (!arch || !arch->ins_is_fused)
- return false;
-
- return arch->ins_is_fused(arch, ins1, ins2);
-}
-
-static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
-{
- char *endptr, *tok, *name;
- struct map *map = ms->map;
- struct addr_map_symbol target = {
- .ms = { .map = map, },
- };
-
- ops->target.addr = strtoull(ops->raw, &endptr, 16);
-
- name = strchr(endptr, '<');
- if (name == NULL)
- goto indirect_call;
-
- name++;
-
- if (arch->objdump.skip_functions_char &&
- strchr(name, arch->objdump.skip_functions_char))
- return -1;
-
- tok = strchr(name, '>');
- if (tok == NULL)
- return -1;
-
- *tok = '\0';
- ops->target.name = strdup(name);
- *tok = '>';
-
- if (ops->target.name == NULL)
- return -1;
-find_target:
- target.addr = map__objdump_2mem(map, ops->target.addr);
-
- if (maps__find_ams(ms->maps, &target) == 0 &&
- map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
- ops->target.sym = target.ms.sym;
-
- return 0;
-
-indirect_call:
- tok = strchr(endptr, '*');
- if (tok != NULL) {
- endptr++;
-
- /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
- * Do not parse such instruction. */
- if (strstr(endptr, "(%r") == NULL)
- ops->target.addr = strtoull(endptr, NULL, 16);
- }
- goto find_target;
-}
-
-static int call__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- if (ops->target.sym)
- return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
-
- if (ops->target.addr == 0)
- return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
-
- if (ops->target.name)
- return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
-
- return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
-}
-
-static struct ins_ops call_ops = {
- .parse = call__parse,
- .scnprintf = call__scnprintf,
-};
-
-bool ins__is_call(const struct ins *ins)
-{
- return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops;
-}
-
-/*
- * Prevents from matching commas in the comment section, e.g.:
- * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
- *
- * and skip comma as part of function arguments, e.g.:
- * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
- */
-static inline const char *validate_comma(const char *c, struct ins_operands *ops)
-{
- if (ops->jump.raw_comment && c > ops->jump.raw_comment)
- return NULL;
-
- if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
- return NULL;
-
- return c;
-}
-
-static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
-{
- struct map *map = ms->map;
- struct symbol *sym = ms->sym;
- struct addr_map_symbol target = {
- .ms = { .map = map, },
- };
- const char *c = strchr(ops->raw, ',');
- u64 start, end;
-
- ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
- ops->jump.raw_func_start = strchr(ops->raw, '<');
-
- c = validate_comma(c, ops);
-
- /*
- * Examples of lines to parse for the _cpp_lex_token@@Base
- * function:
- *
- * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
- * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
- *
- * The first is a jump to an offset inside the same function,
- * the second is to another function, i.e. that 0xa72 is an
- * offset in the cpp_named_operator2name@@base function.
- */
- /*
- * skip over possible up to 2 operands to get to address, e.g.:
- * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
- */
- if (c++ != NULL) {
- ops->target.addr = strtoull(c, NULL, 16);
- if (!ops->target.addr) {
- c = strchr(c, ',');
- c = validate_comma(c, ops);
- if (c++ != NULL)
- ops->target.addr = strtoull(c, NULL, 16);
- }
- } else {
- ops->target.addr = strtoull(ops->raw, NULL, 16);
- }
-
- target.addr = map__objdump_2mem(map, ops->target.addr);
- start = map__unmap_ip(map, sym->start);
- end = map__unmap_ip(map, sym->end);
-
- ops->target.outside = target.addr < start || target.addr > end;
-
- /*
- * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
-
- cpp_named_operator2name@@Base+0xa72
-
- * Point to a place that is after the cpp_named_operator2name
- * boundaries, i.e. in the ELF symbol table for cc1
- * cpp_named_operator2name is marked as being 32-bytes long, but it in
- * fact is much larger than that, so we seem to need a symbols__find()
- * routine that looks for >= current->start and < next_symbol->start,
- * possibly just for C++ objects?
- *
- * For now lets just make some progress by marking jumps to outside the
- * current function as call like.
- *
- * Actual navigation will come next, with further understanding of how
- * the symbol searching and disassembly should be done.
- */
- if (maps__find_ams(ms->maps, &target) == 0 &&
- map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
- ops->target.sym = target.ms.sym;
-
- if (!ops->target.outside) {
- ops->target.offset = target.addr - start;
- ops->target.offset_avail = true;
- } else {
- ops->target.offset_avail = false;
- }
-
- return 0;
-}
-
-static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- const char *c;
-
- if (!ops->target.addr || ops->target.offset < 0)
- return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
-
- if (ops->target.outside && ops->target.sym != NULL)
- return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
-
- c = strchr(ops->raw, ',');
- c = validate_comma(c, ops);
-
- if (c != NULL) {
- const char *c2 = strchr(c + 1, ',');
-
- c2 = validate_comma(c2, ops);
- /* check for 3-op insn */
- if (c2 != NULL)
- c = c2;
- c++;
-
- /* mirror arch objdump's space-after-comma style */
- if (*c == ' ')
- c++;
- }
-
- return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
- ins->name, c ? c - ops->raw : 0, ops->raw,
- ops->target.offset);
-}
-
-static void jump__delete(struct ins_operands *ops __maybe_unused)
-{
- /*
- * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
- * raw string, don't free them.
- */
-}
-
-static struct ins_ops jump_ops = {
- .free = jump__delete,
- .parse = jump__parse,
- .scnprintf = jump__scnprintf,
-};
-
-bool ins__is_jump(const struct ins *ins)
-{
- return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops;
-}
-
-static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
-{
- char *endptr, *name, *t;
-
- if (strstr(raw, "(%rip)") == NULL)
- return 0;
-
- *addrp = strtoull(comment, &endptr, 16);
- if (endptr == comment)
- return 0;
- name = strchr(endptr, '<');
- if (name == NULL)
- return -1;
-
- name++;
-
- t = strchr(name, '>');
- if (t == NULL)
- return 0;
-
- *t = '\0';
- *namep = strdup(name);
- *t = '>';
-
- return 0;
-}
-
-static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
-{
- ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
- if (ops->locked.ops == NULL)
- return 0;
-
- if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
- goto out_free_ops;
-
- ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
-
- if (ops->locked.ins.ops == NULL)
- goto out_free_ops;
-
- if (ops->locked.ins.ops->parse &&
- ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
- goto out_free_ops;
-
- return 0;
-
-out_free_ops:
- zfree(&ops->locked.ops);
- return 0;
-}
-
-static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- int printed;
-
- if (ops->locked.ins.ops == NULL)
- return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
-
- printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
- return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
- size - printed, ops->locked.ops, max_ins_name);
-}
-
-static void lock__delete(struct ins_operands *ops)
-{
- struct ins *ins = &ops->locked.ins;
-
- if (ins->ops && ins->ops->free)
- ins->ops->free(ops->locked.ops);
- else
- ins__delete(ops->locked.ops);
-
- zfree(&ops->locked.ops);
- zfree(&ops->target.raw);
- zfree(&ops->target.name);
-}
-
-static struct ins_ops lock_ops = {
- .free = lock__delete,
- .parse = lock__parse,
- .scnprintf = lock__scnprintf,
-};
-
-/*
- * Check if the operand has more than one registers like x86 SIB addressing:
- * 0x1234(%rax, %rbx, 8)
- *
- * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
- * the input string after 'memory_ref_char' if exists.
- */
-static bool check_multi_regs(struct arch *arch, const char *op)
-{
- int count = 0;
-
- if (arch->objdump.register_char == 0)
- return false;
-
- if (arch->objdump.memory_ref_char) {
- op = strchr(op, arch->objdump.memory_ref_char);
- if (op == NULL)
- return false;
- }
-
- while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
- count++;
- op++;
- }
-
- return count > 1;
-}
-
-static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
-{
- char *s = strchr(ops->raw, ','), *target, *comment, prev;
-
- if (s == NULL)
- return -1;
-
- *s = '\0';
-
- /*
- * x86 SIB addressing has something like 0x8(%rax, %rcx, 1)
- * then it needs to have the closing parenthesis.
- */
- if (strchr(ops->raw, '(')) {
- *s = ',';
- s = strchr(ops->raw, ')');
- if (s == NULL || s[1] != ',')
- return -1;
- *++s = '\0';
- }
-
- ops->source.raw = strdup(ops->raw);
- *s = ',';
-
- if (ops->source.raw == NULL)
- return -1;
-
- ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
-
- target = skip_spaces(++s);
- comment = strchr(s, arch->objdump.comment_char);
-
- if (comment != NULL)
- s = comment - 1;
- else
- s = strchr(s, '\0') - 1;
-
- while (s > target && isspace(s[0]))
- --s;
- s++;
- prev = *s;
- *s = '\0';
-
- ops->target.raw = strdup(target);
- *s = prev;
-
- if (ops->target.raw == NULL)
- goto out_free_source;
-
- ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
-
- if (comment == NULL)
- return 0;
-
- comment = skip_spaces(comment);
- comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
- comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
-
- return 0;
-
-out_free_source:
- zfree(&ops->source.raw);
- return -1;
-}
-
-static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
- ops->source.name ?: ops->source.raw,
- ops->target.name ?: ops->target.raw);
-}
-
-static struct ins_ops mov_ops = {
- .parse = mov__parse,
- .scnprintf = mov__scnprintf,
-};
-
-static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
-{
- char *target, *comment, *s, prev;
-
- target = s = ops->raw;
-
- while (s[0] != '\0' && !isspace(s[0]))
- ++s;
- prev = *s;
- *s = '\0';
-
- ops->target.raw = strdup(target);
- *s = prev;
-
- if (ops->target.raw == NULL)
- return -1;
-
- comment = strchr(s, arch->objdump.comment_char);
- if (comment == NULL)
- return 0;
-
- comment = skip_spaces(comment);
- comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
-
- return 0;
-}
-
-static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name)
-{
- return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
- ops->target.name ?: ops->target.raw);
-}
-
-static struct ins_ops dec_ops = {
- .parse = dec__parse,
- .scnprintf = dec__scnprintf,
-};
-
-static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
- struct ins_operands *ops __maybe_unused, int max_ins_name)
-{
- return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
-}
-
-static struct ins_ops nop_ops = {
- .scnprintf = nop__scnprintf,
-};
-
-static struct ins_ops ret_ops = {
- .scnprintf = ins__raw_scnprintf,
-};
-
-bool ins__is_ret(const struct ins *ins)
-{
- return ins->ops == &ret_ops;
-}
-
-bool ins__is_lock(const struct ins *ins)
-{
- return ins->ops == &lock_ops;
-}
-
-static int ins__key_cmp(const void *name, const void *insp)
-{
- const struct ins *ins = insp;
-
- return strcmp(name, ins->name);
-}
-
-static int ins__cmp(const void *a, const void *b)
-{
- const struct ins *ia = a;
- const struct ins *ib = b;
-
- return strcmp(ia->name, ib->name);
-}
-
-static void ins__sort(struct arch *arch)
-{
- const int nmemb = arch->nr_instructions;
-
- qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
-}
-
-static struct ins_ops *__ins__find(struct arch *arch, const char *name)
-{
- struct ins *ins;
- const int nmemb = arch->nr_instructions;
-
- if (!arch->sorted_instructions) {
- ins__sort(arch);
- arch->sorted_instructions = true;
- }
-
- ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
- if (ins)
- return ins->ops;
-
- if (arch->insn_suffix) {
- char tmp[32];
- char suffix;
- size_t len = strlen(name);
-
- if (len == 0 || len >= sizeof(tmp))
- return NULL;
-
- suffix = name[len - 1];
- if (strchr(arch->insn_suffix, suffix) == NULL)
- return NULL;
-
- strcpy(tmp, name);
- tmp[len - 1] = '\0'; /* remove the suffix and check again */
-
- ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
- }
- return ins ? ins->ops : NULL;
-}
-
-static struct ins_ops *ins__find(struct arch *arch, const char *name)
-{
- struct ins_ops *ops = __ins__find(arch, name);
-
- if (!ops && arch->associate_instruction_ops)
- ops = arch->associate_instruction_ops(arch, name);
-
- return ops;
-}
-
-static int arch__key_cmp(const void *name, const void *archp)
-{
- const struct arch *arch = archp;
-
- return strcmp(name, arch->name);
-}
-
-static int arch__cmp(const void *a, const void *b)
-{
- const struct arch *aa = a;
- const struct arch *ab = b;
-
- return strcmp(aa->name, ab->name);
-}
-
-static void arch__sort(void)
-{
- const int nmemb = ARRAY_SIZE(architectures);
-
- qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
-}
-
-static struct arch *arch__find(const char *name)
-{
- const int nmemb = ARRAY_SIZE(architectures);
- static bool sorted;
-
- if (!sorted) {
- arch__sort();
- sorted = true;
- }
-
- return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
-}
-
-bool arch__is(struct arch *arch, const char *name)
-{
- return !strcmp(arch->name, name);
-}
-
/* symbol histogram: key = offset << 16 | evsel->core.idx */
static size_t sym_hist_hash(long key, void *ctx __maybe_unused)
{
@@ -1156,14 +376,33 @@ int addr_map_symbol__account_cycles(struct addr_map_symbol *ams,
return err;
}
+struct annotation_line *annotated_source__get_line(struct annotated_source *src,
+ s64 offset)
+{
+ struct annotation_line *al;
+
+ list_for_each_entry(al, &src->source, node) {
+ if (al->offset == offset)
+ return al;
+ }
+ return NULL;
+}
+
static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end)
{
+ struct annotation_line *al;
unsigned n_insn = 0;
- u64 offset;
- for (offset = start; offset <= end; offset++) {
- if (notes->src->offsets[offset])
- n_insn++;
+ al = annotated_source__get_line(notes->src, start);
+ if (al == NULL)
+ return 0;
+
+ list_for_each_entry_from(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+ if ((u64)al->offset > end)
+ break;
+ n_insn++;
}
return n_insn;
}
@@ -1180,10 +419,10 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
{
unsigned n_insn;
unsigned int cover_insn = 0;
- u64 offset;
n_insn = annotation__count_insn(notes, start, end);
if (n_insn && ch->num && ch->cycles) {
+ struct annotation_line *al;
struct annotated_branch *branch;
float ipc = n_insn / ((double)ch->cycles / (double)ch->num);
@@ -1191,10 +430,16 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
if (ch->reset >= 0x7fff)
return;
- for (offset = start; offset <= end; offset++) {
- struct annotation_line *al = notes->src->offsets[offset];
+ al = annotated_source__get_line(notes->src, start);
+ if (al == NULL)
+ return;
- if (al && al->cycles && al->cycles->ipc == 0.0) {
+ list_for_each_entry_from(al, &notes->src->source, node) {
+ if (al->offset == -1)
+ continue;
+ if ((u64)al->offset > end)
+ break;
+ if (al->cycles && al->cycles->ipc == 0.0) {
al->cycles->ipc = ipc;
cover_insn++;
}
@@ -1230,7 +475,7 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size)
if (ch && ch->cycles) {
struct annotation_line *al;
- al = notes->src->offsets[offset];
+ al = annotated_source__get_line(notes->src, offset);
if (al && al->cycles == NULL) {
al->cycles = zalloc(sizeof(*al->cycles));
if (al->cycles == NULL) {
@@ -1253,7 +498,9 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size)
struct cyc_hist *ch = &notes->branch->cycles_hist[offset];
if (ch && ch->cycles) {
- struct annotation_line *al = notes->src->offsets[offset];
+ struct annotation_line *al;
+
+ al = annotated_source__get_line(notes->src, offset);
if (al)
zfree(&al->cycles);
}
@@ -1276,142 +523,6 @@ int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *samp
return symbol__inc_addr_samples(&he->ms, evsel, ip, sample);
}
-static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
-{
- dl->ins.ops = ins__find(arch, dl->ins.name);
-
- if (!dl->ins.ops)
- return;
-
- if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
- dl->ins.ops = NULL;
-}
-
-static int disasm_line__parse(char *line, const char **namep, char **rawp)
-{
- char tmp, *name = skip_spaces(line);
-
- if (name[0] == '\0')
- return -1;
-
- *rawp = name + 1;
-
- while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
- ++*rawp;
-
- tmp = (*rawp)[0];
- (*rawp)[0] = '\0';
- *namep = strdup(name);
-
- if (*namep == NULL)
- goto out;
-
- (*rawp)[0] = tmp;
- *rawp = strim(*rawp);
-
- return 0;
-
-out:
- return -1;
-}
-
-struct annotate_args {
- struct arch *arch;
- struct map_symbol ms;
- struct evsel *evsel;
- struct annotation_options *options;
- s64 offset;
- char *line;
- int line_nr;
- char *fileloc;
-};
-
-static void annotation_line__init(struct annotation_line *al,
- struct annotate_args *args,
- int nr)
-{
- al->offset = args->offset;
- al->line = strdup(args->line);
- al->line_nr = args->line_nr;
- al->fileloc = args->fileloc;
- al->data_nr = nr;
-}
-
-static void annotation_line__exit(struct annotation_line *al)
-{
- zfree_srcline(&al->path);
- zfree(&al->line);
- zfree(&al->cycles);
-}
-
-static size_t disasm_line_size(int nr)
-{
- struct annotation_line *al;
-
- return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
-}
-
-/*
- * Allocating the disasm annotation line data with
- * following structure:
- *
- * -------------------------------------------
- * struct disasm_line | struct annotation_line
- * -------------------------------------------
- *
- * We have 'struct annotation_line' member as last member
- * of 'struct disasm_line' to have an easy access.
- */
-static struct disasm_line *disasm_line__new(struct annotate_args *args)
-{
- struct disasm_line *dl = NULL;
- int nr = 1;
-
- if (evsel__is_group_event(args->evsel))
- nr = args->evsel->core.nr_members;
-
- dl = zalloc(disasm_line_size(nr));
- if (!dl)
- return NULL;
-
- annotation_line__init(&dl->al, args, nr);
- if (dl->al.line == NULL)
- goto out_delete;
-
- if (args->offset != -1) {
- if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
- goto out_free_line;
-
- disasm_line__init_ins(dl, args->arch, &args->ms);
- }
-
- return dl;
-
-out_free_line:
- zfree(&dl->al.line);
-out_delete:
- free(dl);
- return NULL;
-}
-
-void disasm_line__free(struct disasm_line *dl)
-{
- if (dl->ins.ops && dl->ins.ops->free)
- dl->ins.ops->free(&dl->ops);
- else
- ins__delete(&dl->ops);
- zfree(&dl->ins.name);
- annotation_line__exit(&dl->al);
- free(dl);
-}
-
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
-{
- if (raw || !dl->ins.ops)
- return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
-
- return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
-}
void annotation__exit(struct annotation *notes)
{
@@ -1471,8 +582,7 @@ bool annotation__trylock(struct annotation *notes)
return mutex_trylock(mutex);
}
-
-static void annotation_line__add(struct annotation_line *al, struct list_head *head)
+void annotation_line__add(struct annotation_line *al, struct list_head *head)
{
list_add_tail(&al->node, head);
}
@@ -1682,673 +792,6 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
return 0;
}
-/*
- * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
- * which looks like following
- *
- * 0000000000415500 <_init>:
- * 415500: sub $0x8,%rsp
- * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
- * 41550b: test %rax,%rax
- * 41550e: je 415515 <_init+0x15>
- * 415510: callq 416e70 <__gmon_start__@plt>
- * 415515: add $0x8,%rsp
- * 415519: retq
- *
- * it will be parsed and saved into struct disasm_line as
- * <offset> <name> <ops.raw>
- *
- * The offset will be a relative offset from the start of the symbol and -1
- * means that it's not a disassembly line so should be treated differently.
- * The ops.raw part will be parsed further according to type of the instruction.
- */
-static int symbol__parse_objdump_line(struct symbol *sym,
- struct annotate_args *args,
- char *parsed_line, int *line_nr, char **fileloc)
-{
- struct map *map = args->ms.map;
- struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *dl;
- char *tmp;
- s64 line_ip, offset = -1;
- regmatch_t match[2];
-
- /* /filename:linenr ? Save line number and ignore. */
- if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
- *line_nr = atoi(parsed_line + match[1].rm_so);
- free(*fileloc);
- *fileloc = strdup(parsed_line);
- return 0;
- }
-
- /* Process hex address followed by ':'. */
- line_ip = strtoull(parsed_line, &tmp, 16);
- if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
- u64 start = map__rip_2objdump(map, sym->start),
- end = map__rip_2objdump(map, sym->end);
-
- offset = line_ip - start;
- if ((u64)line_ip < start || (u64)line_ip >= end)
- offset = -1;
- else
- parsed_line = tmp + 1;
- }
-
- args->offset = offset;
- args->line = parsed_line;
- args->line_nr = *line_nr;
- args->fileloc = *fileloc;
- args->ms.sym = sym;
-
- dl = disasm_line__new(args);
- (*line_nr)++;
-
- if (dl == NULL)
- return -1;
-
- if (!disasm_line__has_local_offset(dl)) {
- dl->ops.target.offset = dl->ops.target.addr -
- map__rip_2objdump(map, sym->start);
- dl->ops.target.offset_avail = true;
- }
-
- /* kcore has no symbols, so add the call target symbol */
- if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
- struct addr_map_symbol target = {
- .addr = dl->ops.target.addr,
- .ms = { .map = map, },
- };
-
- if (!maps__find_ams(args->ms.maps, &target) &&
- target.ms.sym->start == target.al_addr)
- dl->ops.target.sym = target.ms.sym;
- }
-
- annotation_line__add(&dl->al, &notes->src->source);
- return 0;
-}
-
-static __attribute__((constructor)) void symbol__init_regexpr(void)
-{
- regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
-}
-
-static void delete_last_nop(struct symbol *sym)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct list_head *list = &notes->src->source;
- struct disasm_line *dl;
-
- while (!list_empty(list)) {
- dl = list_entry(list->prev, struct disasm_line, al.node);
-
- if (dl->ins.ops) {
- if (dl->ins.ops != &nop_ops)
- return;
- } else {
- if (!strstr(dl->al.line, " nop ") &&
- !strstr(dl->al.line, " nopl ") &&
- !strstr(dl->al.line, " nopw "))
- return;
- }
-
- list_del_init(&dl->al.node);
- disasm_line__free(dl);
- }
-}
-
-int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
-{
- struct dso *dso = map__dso(ms->map);
-
- BUG_ON(buflen == 0);
-
- if (errnum >= 0) {
- str_error_r(errnum, buf, buflen);
- return 0;
- }
-
- switch (errnum) {
- case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
- char bf[SBUILD_ID_SIZE + 15] = " with build id ";
- char *build_id_msg = NULL;
-
- if (dso->has_build_id) {
- build_id__sprintf(&dso->bid, bf + 15);
- build_id_msg = bf;
- }
- scnprintf(buf, buflen,
- "No vmlinux file%s\nwas found in the path.\n\n"
- "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
- "Please use:\n\n"
- " perf buildid-cache -vu vmlinux\n\n"
- "or:\n\n"
- " --vmlinux vmlinux\n", build_id_msg ?: "");
- }
- break;
- case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
- scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
- break;
- case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
- scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
- break;
- case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
- scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
- break;
- case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
- scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
- break;
- case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
- scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
- dso->long_name);
- break;
- default:
- scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
- break;
- }
-
- return 0;
-}
-
-static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
-{
- char linkname[PATH_MAX];
- char *build_id_filename;
- char *build_id_path = NULL;
- char *pos;
- int len;
-
- if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
- !dso__is_kcore(dso))
- return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
-
- build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
- if (build_id_filename) {
- __symbol__join_symfs(filename, filename_size, build_id_filename);
- free(build_id_filename);
- } else {
- if (dso->has_build_id)
- return ENOMEM;
- goto fallback;
- }
-
- build_id_path = strdup(filename);
- if (!build_id_path)
- return ENOMEM;
-
- /*
- * old style build-id cache has name of XX/XXXXXXX.. while
- * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
- * extract the build-id part of dirname in the new style only.
- */
- pos = strrchr(build_id_path, '/');
- if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
- dirname(build_id_path);
-
- if (dso__is_kcore(dso))
- goto fallback;
-
- len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
- if (len < 0)
- goto fallback;
-
- linkname[len] = '\0';
- if (strstr(linkname, DSO__NAME_KALLSYMS) ||
- access(filename, R_OK)) {
-fallback:
- /*
- * If we don't have build-ids or the build-id file isn't in the
- * cache, or is just a kallsyms file, well, lets hope that this
- * DSO is the same as when 'perf record' ran.
- */
- if (dso->kernel && dso->long_name[0] == '/')
- snprintf(filename, filename_size, "%s", dso->long_name);
- else
- __symbol__join_symfs(filename, filename_size, dso->long_name);
-
- mutex_lock(&dso->lock);
- if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) {
- char *new_name = dso__filename_with_chroot(dso, filename);
- if (new_name) {
- strlcpy(filename, new_name, filename_size);
- free(new_name);
- }
- }
- mutex_unlock(&dso->lock);
- }
-
- free(build_id_path);
- return 0;
-}
-
-#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-#define PACKAGE "perf"
-#include <bfd.h>
-#include <dis-asm.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
-#include <linux/btf.h>
-#include <tools/dis-asm-compat.h>
-
-static int symbol__disassemble_bpf(struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct bpf_prog_linfo *prog_linfo = NULL;
- struct bpf_prog_info_node *info_node;
- int len = sym->end - sym->start;
- disassembler_ftype disassemble;
- struct map *map = args->ms.map;
- struct perf_bpil *info_linear;
- struct disassemble_info info;
- struct dso *dso = map__dso(map);
- int pc = 0, count, sub_id;
- struct btf *btf = NULL;
- char tpath[PATH_MAX];
- size_t buf_size;
- int nr_skip = 0;
- char *buf;
- bfd *bfdf;
- int ret;
- FILE *s;
-
- if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
- return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
-
- pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
- sym->name, sym->start, sym->end - sym->start);
-
- memset(tpath, 0, sizeof(tpath));
- perf_exe(tpath, sizeof(tpath));
-
- bfdf = bfd_openr(tpath, NULL);
- if (bfdf == NULL)
- abort();
-
- if (!bfd_check_format(bfdf, bfd_object))
- abort();
-
- s = open_memstream(&buf, &buf_size);
- if (!s) {
- ret = errno;
- goto out;
- }
- init_disassemble_info_compat(&info, s,
- (fprintf_ftype) fprintf,
- fprintf_styled);
- info.arch = bfd_get_arch(bfdf);
- info.mach = bfd_get_mach(bfdf);
-
- info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
- dso->bpf_prog.id);
- if (!info_node) {
- ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
- goto out;
- }
- info_linear = info_node->info_linear;
- sub_id = dso->bpf_prog.sub_id;
-
- info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
- info.buffer_length = info_linear->info.jited_prog_len;
-
- if (info_linear->info.nr_line_info)
- prog_linfo = bpf_prog_linfo__new(&info_linear->info);
-
- if (info_linear->info.btf_id) {
- struct btf_node *node;
-
- node = perf_env__find_btf(dso->bpf_prog.env,
- info_linear->info.btf_id);
- if (node)
- btf = btf__new((__u8 *)(node->data),
- node->data_size);
- }
-
- disassemble_init_for_target(&info);
-
-#ifdef DISASM_FOUR_ARGS_SIGNATURE
- disassemble = disassembler(info.arch,
- bfd_big_endian(bfdf),
- info.mach,
- bfdf);
-#else
- disassemble = disassembler(bfdf);
-#endif
- if (disassemble == NULL)
- abort();
-
- fflush(s);
- do {
- const struct bpf_line_info *linfo = NULL;
- struct disasm_line *dl;
- size_t prev_buf_size;
- const char *srcline;
- u64 addr;
-
- addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
- count = disassemble(pc, &info);
-
- if (prog_linfo)
- linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
- addr, sub_id,
- nr_skip);
-
- if (linfo && btf) {
- srcline = btf__name_by_offset(btf, linfo->line_off);
- nr_skip++;
- } else
- srcline = NULL;
-
- fprintf(s, "\n");
- prev_buf_size = buf_size;
- fflush(s);
-
- if (!annotate_opts.hide_src_code && srcline) {
- args->offset = -1;
- args->line = strdup(srcline);
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl) {
- annotation_line__add(&dl->al,
- &notes->src->source);
- }
- }
-
- args->offset = pc;
- args->line = buf + prev_buf_size;
- args->line_nr = 0;
- args->fileloc = NULL;
- args->ms.sym = sym;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- pc += count;
- } while (count > 0 && pc < len);
-
- ret = 0;
-out:
- free(prog_linfo);
- btf__free(btf);
- fclose(s);
- bfd_close(bfdf);
- return ret;
-}
-#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
- struct annotate_args *args __maybe_unused)
-{
- return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
-}
-#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
-
-static int
-symbol__disassemble_bpf_image(struct symbol *sym,
- struct annotate_args *args)
-{
- struct annotation *notes = symbol__annotation(sym);
- struct disasm_line *dl;
-
- args->offset = -1;
- args->line = strdup("to be implemented");
- args->line_nr = 0;
- args->fileloc = NULL;
- dl = disasm_line__new(args);
- if (dl)
- annotation_line__add(&dl->al, &notes->src->source);
-
- zfree(&args->line);
- return 0;
-}
-
-/*
- * Possibly create a new version of line with tabs expanded. Returns the
- * existing or new line, storage is updated if a new line is allocated. If
- * allocation fails then NULL is returned.
- */
-static char *expand_tabs(char *line, char **storage, size_t *storage_len)
-{
- size_t i, src, dst, len, new_storage_len, num_tabs;
- char *new_line;
- size_t line_len = strlen(line);
-
- for (num_tabs = 0, i = 0; i < line_len; i++)
- if (line[i] == '\t')
- num_tabs++;
-
- if (num_tabs == 0)
- return line;
-
- /*
- * Space for the line and '\0', less the leading and trailing
- * spaces. Each tab may introduce 7 additional spaces.
- */
- new_storage_len = line_len + 1 + (num_tabs * 7);
-
- new_line = malloc(new_storage_len);
- if (new_line == NULL) {
- pr_err("Failure allocating memory for tab expansion\n");
- return NULL;
- }
-
- /*
- * Copy regions starting at src and expand tabs. If there are two
- * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
- * are inserted.
- */
- for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
- if (line[i] == '\t') {
- len = i - src;
- memcpy(&new_line[dst], &line[src], len);
- dst += len;
- new_line[dst++] = ' ';
- while (dst % 8 != 0)
- new_line[dst++] = ' ';
- src = i + 1;
- num_tabs--;
- }
- }
-
- /* Expand the last region. */
- len = line_len - src;
- memcpy(&new_line[dst], &line[src], len);
- dst += len;
- new_line[dst] = '\0';
-
- free(*storage);
- *storage = new_line;
- *storage_len = new_storage_len;
- return new_line;
-
-}
-
-static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
-{
- struct annotation_options *opts = &annotate_opts;
- struct map *map = args->ms.map;
- struct dso *dso = map__dso(map);
- char *command;
- FILE *file;
- char symfs_filename[PATH_MAX];
- struct kcore_extract kce;
- bool delete_extract = false;
- bool decomp = false;
- int lineno = 0;
- char *fileloc = NULL;
- int nline;
- char *line;
- size_t line_len;
- const char *objdump_argv[] = {
- "/bin/sh",
- "-c",
- NULL, /* Will be the objdump command to run. */
- "--",
- NULL, /* Will be the symfs path. */
- NULL,
- };
- struct child_process objdump_process;
- int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
-
- if (err)
- return err;
-
- pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
- symfs_filename, sym->name, map__unmap_ip(map, sym->start),
- map__unmap_ip(map, sym->end));
-
- pr_debug("annotating [%p] %30s : [%p] %30s\n",
- dso, dso->long_name, sym, sym->name);
-
- if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) {
- return symbol__disassemble_bpf(sym, args);
- } else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) {
- return symbol__disassemble_bpf_image(sym, args);
- } else if (dso__is_kcore(dso)) {
- kce.kcore_filename = symfs_filename;
- kce.addr = map__rip_2objdump(map, sym->start);
- kce.offs = sym->start;
- kce.len = sym->end - sym->start;
- if (!kcore_extract__create(&kce)) {
- delete_extract = true;
- strlcpy(symfs_filename, kce.extract_filename,
- sizeof(symfs_filename));
- }
- } else if (dso__needs_decompress(dso)) {
- char tmp[KMOD_DECOMP_LEN];
-
- if (dso__decompress_kmodule_path(dso, symfs_filename,
- tmp, sizeof(tmp)) < 0)
- return -1;
-
- decomp = true;
- strcpy(symfs_filename, tmp);
- }
-
- err = asprintf(&command,
- "%s %s%s --start-address=0x%016" PRIx64
- " --stop-address=0x%016" PRIx64
- " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
- opts->objdump_path ?: "objdump",
- opts->disassembler_style ? "-M " : "",
- opts->disassembler_style ?: "",
- map__rip_2objdump(map, sym->start),
- map__rip_2objdump(map, sym->end),
- opts->show_linenr ? "-l" : "",
- opts->show_asm_raw ? "" : "--no-show-raw-insn",
- opts->annotate_src ? "-S" : "",
- opts->prefix ? "--prefix " : "",
- opts->prefix ? '"' : ' ',
- opts->prefix ?: "",
- opts->prefix ? '"' : ' ',
- opts->prefix_strip ? "--prefix-strip=" : "",
- opts->prefix_strip ?: "");
-
- if (err < 0) {
- pr_err("Failure allocating memory for the command to run\n");
- goto out_remove_tmp;
- }
-
- pr_debug("Executing: %s\n", command);
-
- objdump_argv[2] = command;
- objdump_argv[4] = symfs_filename;
-
- /* Create a pipe to read from for stdout */
- memset(&objdump_process, 0, sizeof(objdump_process));
- objdump_process.argv = objdump_argv;
- objdump_process.out = -1;
- objdump_process.err = -1;
- objdump_process.no_stderr = 1;
- if (start_command(&objdump_process)) {
- pr_err("Failure starting to run %s\n", command);
- err = -1;
- goto out_free_command;
- }
-
- file = fdopen(objdump_process.out, "r");
- if (!file) {
- pr_err("Failure creating FILE stream for %s\n", command);
- /*
- * If we were using debug info should retry with
- * original binary.
- */
- err = -1;
- goto out_close_stdout;
- }
-
- /* Storage for getline. */
- line = NULL;
- line_len = 0;
-
- nline = 0;
- while (!feof(file)) {
- const char *match;
- char *expanded_line;
-
- if (getline(&line, &line_len, file) < 0 || !line)
- break;
-
- /* Skip lines containing "filename:" */
- match = strstr(line, symfs_filename);
- if (match && match[strlen(symfs_filename)] == ':')
- continue;
-
- expanded_line = strim(line);
- expanded_line = expand_tabs(expanded_line, &line, &line_len);
- if (!expanded_line)
- break;
-
- /*
- * The source code line number (lineno) needs to be kept in
- * across calls to symbol__parse_objdump_line(), so that it
- * can associate it with the instructions till the next one.
- * See disasm_line__new() and struct disasm_line::line_nr.
- */
- if (symbol__parse_objdump_line(sym, args, expanded_line,
- &lineno, &fileloc) < 0)
- break;
- nline++;
- }
- free(line);
- free(fileloc);
-
- err = finish_command(&objdump_process);
- if (err)
- pr_err("Error running %s\n", command);
-
- if (nline == 0) {
- err = -1;
- pr_err("No output from %s\n", command);
- }
-
- /*
- * kallsyms does not have symbol sizes so there may a nop at the end.
- * Remove it.
- */
- if (dso__is_kcore(dso))
- delete_last_nop(sym);
-
- fclose(file);
-
-out_close_stdout:
- close(objdump_process.out);
-
-out_free_command:
- free(command);
-
-out_remove_tmp:
- if (decomp)
- unlink(symfs_filename);
-
- if (delete_extract)
- kcore_extract__delete(&kce);
-
- return err;
-}
-
static void calc_percent(struct annotation *notes,
struct evsel *evsel,
struct annotation_data *data,
@@ -2429,8 +872,10 @@ static int evsel__get_arch(struct evsel *evsel, struct arch **parch)
struct arch *arch;
int err;
- if (!arch_name)
+ if (!arch_name) {
+ *parch = NULL;
return errno;
+ }
*parch = arch = arch__find(arch_name);
if (arch == NULL) {
@@ -2468,15 +913,22 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
if (parch)
*parch = arch;
- if (!list_empty(&notes->src->source))
+ if (notes->src && !list_empty(&notes->src->source))
return 0;
args.arch = arch;
args.ms = *ms;
+
+ if (notes->src == NULL) {
+ notes->src = annotated_source__new();
+ if (notes->src == NULL)
+ return -1;
+ }
+
if (annotate_opts.full_addr)
- notes->start = map__objdump_2mem(ms->map, ms->sym->start);
+ notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
else
- notes->start = map__rip_2objdump(ms->map, ms->sym->start);
+ notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
return symbol__disassemble(sym, &args);
}
@@ -2658,7 +1110,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel)
int graph_dotted_len;
char buf[512];
- filename = strdup(dso->long_name);
+ filename = strdup(dso__long_name(dso));
if (!filename)
return -ENOMEM;
@@ -2823,7 +1275,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel)
}
fprintf(fp, "%s() %s\nEvent: %s\n\n",
- ms->sym->name, map__dso(ms->map)->long_name, ev_name);
+ ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name);
symbol__annotate_fprintf2(ms->sym, fp);
fclose(fp);
@@ -2845,13 +1297,16 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
{
struct annotation *notes = symbol__annotation(sym);
struct sym_hist *h = annotation__histogram(notes, evidx);
- int len = symbol__size(sym), offset;
+ struct annotation_line *al;
h->nr_samples = 0;
- for (offset = 0; offset < len; ++offset) {
+ list_for_each_entry(al, &notes->src->source, node) {
struct sym_hist_entry *entry;
- entry = annotated_source__hist_entry(notes->src, evidx, offset);
+ if (al->offset == -1)
+ continue;
+
+ entry = annotated_source__hist_entry(notes->src, evidx, al->offset);
if (entry == NULL)
continue;
@@ -2908,64 +1363,56 @@ bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym
return true;
}
-void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
+static void
+annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym)
{
- u64 offset, size = symbol__size(sym);
+ struct annotation_line *al;
/* PLT symbols contain external offsets */
if (strstr(sym->name, "@plt"))
return;
- for (offset = 0; offset < size; ++offset) {
- struct annotation_line *al = notes->src->offsets[offset];
+ list_for_each_entry(al, &notes->src->source, node) {
struct disasm_line *dl;
+ struct annotation_line *target;
dl = disasm_line(al);
if (!disasm_line__is_valid_local_jump(dl, sym))
continue;
- al = notes->src->offsets[dl->ops.target.offset];
-
+ target = annotated_source__get_line(notes->src,
+ dl->ops.target.offset);
/*
* FIXME: Oops, no jump target? Buggy disassembler? Or do we
* have to adjust to the previous offset?
*/
- if (al == NULL)
+ if (target == NULL)
continue;
- if (++al->jump_sources > notes->max_jump_sources)
- notes->max_jump_sources = al->jump_sources;
+ if (++target->jump_sources > notes->src->max_jump_sources)
+ notes->src->max_jump_sources = target->jump_sources;
}
}
-void annotation__set_offsets(struct annotation *notes, s64 size)
+static void annotation__set_index(struct annotation *notes)
{
struct annotation_line *al;
struct annotated_source *src = notes->src;
- src->max_line_len = 0;
+ src->widths.max_line_len = 0;
src->nr_entries = 0;
src->nr_asm_entries = 0;
list_for_each_entry(al, &src->source, node) {
size_t line_len = strlen(al->line);
- if (src->max_line_len < line_len)
- src->max_line_len = line_len;
+ if (src->widths.max_line_len < line_len)
+ src->widths.max_line_len = line_len;
al->idx = src->nr_entries++;
- if (al->offset != -1) {
+ if (al->offset != -1)
al->idx_asm = src->nr_asm_entries++;
- /*
- * FIXME: short term bandaid to cope with assembly
- * routines that comes with labels in the same column
- * as the address in objdump, sigh.
- *
- * E.g. copy_user_generic_unrolled
- */
- if (al->offset < size)
- notes->src->offsets[al->offset] = al;
- } else
+ else
al->idx_asm = -1;
}
}
@@ -2996,28 +1443,29 @@ static int annotation__max_ins_name(struct annotation *notes)
return max_name;
}
-void annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
+static void
+annotation__init_column_widths(struct annotation *notes, struct symbol *sym)
{
- notes->widths.addr = notes->widths.target =
- notes->widths.min_addr = hex_width(symbol__size(sym));
- notes->widths.max_addr = hex_width(sym->end);
- notes->widths.jumps = width_jumps(notes->max_jump_sources);
- notes->widths.max_ins_name = annotation__max_ins_name(notes);
+ notes->src->widths.addr = notes->src->widths.target =
+ notes->src->widths.min_addr = hex_width(symbol__size(sym));
+ notes->src->widths.max_addr = hex_width(sym->end);
+ notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources);
+ notes->src->widths.max_ins_name = annotation__max_ins_name(notes);
}
void annotation__update_column_widths(struct annotation *notes)
{
if (annotate_opts.use_offset)
- notes->widths.target = notes->widths.min_addr;
+ notes->src->widths.target = notes->src->widths.min_addr;
else if (annotate_opts.full_addr)
- notes->widths.target = BITS_PER_LONG / 4;
+ notes->src->widths.target = BITS_PER_LONG / 4;
else
- notes->widths.target = notes->widths.max_addr;
+ notes->src->widths.target = notes->src->widths.max_addr;
- notes->widths.addr = notes->widths.target;
+ notes->src->widths.addr = notes->src->widths.target;
if (annotate_opts.show_nr_jumps)
- notes->widths.addr += notes->widths.jumps + 1;
+ notes->src->widths.addr += notes->src->widths.jumps + 1;
}
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms)
@@ -3025,9 +1473,9 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m
annotate_opts.full_addr = !annotate_opts.full_addr;
if (annotate_opts.full_addr)
- notes->start = map__objdump_2mem(ms->map, ms->sym->start);
+ notes->src->start = map__objdump_2mem(ms->map, ms->sym->start);
else
- notes->start = map__rip_2objdump(ms->map, ms->sym->start);
+ notes->src->start = map__rip_2objdump(ms->map, ms->sym->start);
annotation__update_column_widths(notes);
}
@@ -3085,7 +1533,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
if (err) {
char msg[BUFSIZ];
- dso->annotate_warned = true;
+ dso__set_annotate_warned(dso);
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
return -1;
@@ -3094,13 +1542,12 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel)
if (annotate_opts.print_lines) {
srcline_full_filename = annotate_opts.full_path;
symbol__calc_lines(ms, &source_line);
- print_summary(&source_line, dso->long_name);
+ print_summary(&source_line, dso__long_name(dso));
}
hists__scnprintf_title(hists, buf, sizeof(buf));
fprintf(stdout, "%s, [percent: %s]\n%s() %s\n",
- buf, percent_type_str(annotate_opts.percent_type), sym->name,
- dso->long_name);
+ buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso));
symbol__annotate_fprintf2(sym, stdout);
annotated_source__purge(symbol__annotation(sym)->src);
@@ -3119,7 +1566,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
if (err) {
char msg[BUFSIZ];
- dso->annotate_warned = true;
+ dso__set_annotate_warned(dso);
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
ui__error("Couldn't annotate %s:\n%s", sym->name, msg);
return -1;
@@ -3130,7 +1577,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel)
if (annotate_opts.print_lines) {
srcline_full_filename = annotate_opts.full_path;
symbol__calc_lines(ms, &source_line);
- print_summary(&source_line, dso->long_name);
+ print_summary(&source_line, dso__long_name(dso));
}
symbol__annotate_printf(ms, evsel);
@@ -3153,7 +1600,7 @@ static double annotation_line__max_percent(struct annotation_line *al,
double percent_max = 0.0;
int i;
- for (i = 0; i < notes->nr_events; i++) {
+ for (i = 0; i < notes->src->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i],
@@ -3194,7 +1641,8 @@ call_like:
obj__printf(obj, " ");
}
- disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name);
+ disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset,
+ notes->src->widths.max_ins_name);
}
static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
@@ -3242,7 +1690,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (al->offset != -1 && percent_max != 0.0) {
int i;
- for (i = 0; i < notes->nr_events; i++) {
+ for (i = 0; i < notes->src->nr_events; i++) {
double percent;
percent = annotation_data__percent(&al->data[i], percent_type);
@@ -3322,9 +1770,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " ");
else if (al->offset == -1) {
if (al->line_nr && annotate_opts.show_linenr)
- printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr);
+ printed = scnprintf(bf, sizeof(bf), "%-*d ",
+ notes->src->widths.addr + 1, al->line_nr);
else
- printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " ");
+ printed = scnprintf(bf, sizeof(bf), "%-*s ",
+ notes->src->widths.addr, " ");
obj__printf(obj, bf);
obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line);
} else {
@@ -3332,7 +1782,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
int color = -1;
if (!annotate_opts.use_offset)
- addr += notes->start;
+ addr += notes->src->start;
if (!annotate_opts.use_offset) {
printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr);
@@ -3342,7 +1792,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
if (annotate_opts.show_nr_jumps) {
int prev;
printed = scnprintf(bf, sizeof(bf), "%*d ",
- notes->widths.jumps,
+ notes->src->widths.jumps,
al->jump_sources);
prev = obj__set_jumps_percent_color(obj, al->jump_sources,
current_entry);
@@ -3351,7 +1801,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
}
print_addr:
printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ",
- notes->widths.target, addr);
+ notes->src->widths.target, addr);
} else if (ins__is_call(&disasm_line(al)->ins) &&
annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) {
goto print_addr;
@@ -3359,7 +1809,7 @@ print_addr:
goto print_addr;
} else {
printed = scnprintf(bf, sizeof(bf), "%-*s ",
- notes->widths.addr, " ");
+ notes->src->widths.addr, " ");
}
}
@@ -3395,37 +1845,29 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
size_t size = symbol__size(sym);
int nr_pcnt = 1, err;
- notes->src->offsets = zalloc(size * sizeof(struct annotation_line *));
- if (notes->src->offsets == NULL)
- return ENOMEM;
-
if (evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
err = symbol__annotate(ms, evsel, parch);
if (err)
- goto out_free_offsets;
+ return err;
symbol__calc_percent(sym, evsel);
- annotation__set_offsets(notes, size);
+ annotation__set_index(notes);
annotation__mark_jump_targets(notes, sym);
err = annotation__compute_ipc(notes, size);
if (err)
- goto out_free_offsets;
+ return err;
annotation__init_column_widths(notes, sym);
- notes->nr_events = nr_pcnt;
+ notes->src->nr_events = nr_pcnt;
annotation__update_column_widths(notes);
sym->annotate2 = 1;
return 0;
-
-out_free_offsets:
- zfree(&notes->src->offsets);
- return err;
}
static int annotation__config(const char *var, const char *value, void *data)
@@ -3597,6 +2039,12 @@ static int extract_reg_offset(struct arch *arch, const char *str,
* %gs:0x18(%rbx). In that case it should skip the part.
*/
if (*str == arch->objdump.register_char) {
+ if (arch__is(arch, "x86")) {
+ /* FIXME: Handle other segment registers */
+ if (!strncmp(str, "%gs:", 4))
+ op_loc->segment = INSN_SEG_X86_GS;
+ }
+
while (*str && !isdigit(*str) &&
*str != arch->objdump.memory_ref_char)
str++;
@@ -3651,7 +2099,7 @@ static int extract_reg_offset(struct arch *arch, const char *str,
* mov 0x18, %r8 # src_reg1 = -1, src_mem = 0
* # dst_reg1 = r8, dst_mem = 0
*
- * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0
+ * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0
* # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1
* # dst_multi_regs = 1, dst_offset = 8
*/
@@ -3662,7 +2110,7 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
struct annotated_op_loc *op_loc;
int i;
- if (!strcmp(dl->ins.name, "lock"))
+ if (ins__is_lock(&dl->ins))
ops = dl->ops.locked.ops;
else
ops = &dl->ops;
@@ -3693,40 +2141,40 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl,
op_loc->multi_regs = multi_regs;
extract_reg_offset(arch, insn_str, op_loc);
} else {
- char *s = strdup(insn_str);
+ char *s, *p = NULL;
+
+ if (arch__is(arch, "x86")) {
+ /* FIXME: Handle other segment registers */
+ if (!strncmp(insn_str, "%gs:", 4)) {
+ op_loc->segment = INSN_SEG_X86_GS;
+ op_loc->offset = strtol(insn_str + 4,
+ &p, 0);
+ if (p && p != insn_str + 4)
+ op_loc->imm = true;
+ continue;
+ }
+ }
+
+ s = strdup(insn_str);
+ if (s == NULL)
+ return -1;
- if (s) {
+ if (*s == arch->objdump.register_char)
op_loc->reg1 = get_dwarf_regnum(s, 0);
- free(s);
+ else if (*s == arch->objdump.imm_char) {
+ op_loc->offset = strtol(s + 1, &p, 0);
+ if (p && p != s + 1)
+ op_loc->imm = true;
}
+ free(s);
}
}
return 0;
}
-static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel)
-{
- struct disasm_line *dl, *tmp_dl;
- struct annotation *notes;
-
- notes = symbol__annotation(ms->sym);
- if (!list_empty(&notes->src->source))
- return;
-
- if (symbol__annotate(ms, evsel, NULL) < 0)
- return;
-
- /* remove non-insn disasm lines for simplicity */
- list_for_each_entry_safe(dl, tmp_dl, &notes->src->source, al.node) {
- if (dl->al.offset == -1) {
- list_del(&dl->al.node);
- free(dl);
- }
- }
-}
-
-static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
+static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip,
+ bool allow_update)
{
struct disasm_line *dl;
struct annotation *notes;
@@ -3734,12 +2182,16 @@ static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip)
notes = symbol__annotation(sym);
list_for_each_entry(dl, &notes->src->source, al.node) {
+ if (dl->al.offset == -1)
+ continue;
+
if (sym->start + dl->al.offset == ip) {
/*
* llvm-objdump places "lock" in a separate line and
* in that case, we want to get the next line.
*/
- if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') {
+ if (ins__is_lock(&dl->ins) &&
+ *dl->ops.raw == '\0' && allow_update) {
ip++;
continue;
}
@@ -3785,6 +2237,58 @@ static bool is_stack_operation(struct arch *arch, struct disasm_line *dl)
return false;
}
+static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc)
+{
+ /* On x86_64, %gs:40 is used for stack canary */
+ if (arch__is(arch, "x86")) {
+ if (loc->segment == INSN_SEG_X86_GS && loc->imm &&
+ loc->offset == 40)
+ return true;
+ }
+
+ return false;
+}
+
+static struct disasm_line *
+annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr)
+{
+ struct list_head *sources = &notes->src->source;
+ struct disasm_line *prev;
+
+ if (curr == list_first_entry(sources, struct disasm_line, al.node))
+ return NULL;
+
+ prev = list_prev_entry(curr, al.node);
+ while (prev->al.offset == -1 &&
+ prev != list_first_entry(sources, struct disasm_line, al.node))
+ prev = list_prev_entry(prev, al.node);
+
+ if (prev->al.offset == -1)
+ return NULL;
+
+ return prev;
+}
+
+static struct disasm_line *
+annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr)
+{
+ struct list_head *sources = &notes->src->source;
+ struct disasm_line *next;
+
+ if (curr == list_last_entry(sources, struct disasm_line, al.node))
+ return NULL;
+
+ next = list_next_entry(curr, al.node);
+ while (next->al.offset == -1 &&
+ next != list_last_entry(sources, struct disasm_line, al.node))
+ next = list_next_entry(next, al.node);
+
+ if (next->al.offset == -1)
+ return NULL;
+
+ return next;
+}
+
u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
struct disasm_line *dl)
{
@@ -3800,12 +2304,12 @@ u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
* disasm_line. If it's the last one, we can use symbol's end
* address directly.
*/
- if (&dl->al.node == notes->src->source.prev)
+ next = annotation__next_asm_line(notes, dl);
+ if (next == NULL)
addr = ms->sym->end + offset;
- else {
- next = list_next_entry(dl, al.node);
+ else
addr = ip + (next->al.offset - dl->al.offset) + offset;
- }
+
return map__rip_2objdump(ms->map, addr);
}
@@ -3828,9 +2332,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
struct annotated_op_loc *op_loc;
struct annotated_data_type *mem_type;
struct annotated_item_stat *istat;
- u64 ip = he->ip, addr = 0;
- const char *var_name = NULL;
- int var_offset;
+ u64 ip = he->ip;
int i;
ann_data_stat.total++;
@@ -3845,19 +2347,17 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he)
return NULL;
}
- if (evsel__get_arch(evsel, &arch) < 0) {
+ /* Make sure it has the disasm of the function */
+ if (symbol__annotate(ms, evsel, &arch) < 0) {
ann_data_stat.no_insn++;
return NULL;
}
- /* Make sure it runs objdump to get disasm of the function */
- symbol__ensure_annotate(ms, evsel);
-
/*
* Get a disasm to extract the location from the insn.
* This is too slow...
*/
- dl = find_disasm_line(ms->sym, ip);
+ dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true);
if (dl == NULL) {
ann_data_stat.no_insn++;
return NULL;
@@ -3883,51 +2383,55 @@ retry:
}
for_each_insn_op_loc(&loc, i, op_loc) {
- if (!op_loc->mem_ref)
+ struct data_loc_info dloc = {
+ .arch = arch,
+ .thread = he->thread,
+ .ms = ms,
+ /* Recalculate IP for LOCK prefix or insn fusion */
+ .ip = ms->sym->start + dl->al.offset,
+ .cpumode = he->cpumode,
+ .op = op_loc,
+ };
+
+ if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE)
continue;
/* Recalculate IP because of LOCK prefix or insn fusion */
ip = ms->sym->start + dl->al.offset;
- var_offset = op_loc->offset;
-
/* PC-relative addressing */
if (op_loc->reg1 == DWARF_REG_PC) {
- struct addr_location al;
- struct symbol *var;
- u64 map_addr;
-
- addr = annotate_calc_pcrel(ms, ip, op_loc->offset, dl);
- /* Kernel symbols might be relocated */
- map_addr = addr + map__reloc(ms->map);
-
- addr_location__init(&al);
- var = thread__find_symbol_fb(he->thread, he->cpumode,
- map_addr, &al);
- if (var) {
- var_name = var->name;
- /* Calculate type offset from the start of variable */
- var_offset = map_addr - map__unmap_ip(al.map, var->start);
- }
- addr_location__exit(&al);
+ dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip,
+ op_loc->offset, dl);
+ }
+
+ /* This CPU access in kernel - pretend PC-relative addressing */
+ if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") &&
+ op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) {
+ dloc.var_addr = op_loc->offset;
+ op_loc->reg1 = DWARF_REG_PC;
+ }
+
+ mem_type = find_data_type(&dloc);
+
+ if (mem_type == NULL && is_stack_canary(arch, op_loc)) {
+ istat->good++;
+ he->mem_type_off = 0;
+ return &canary_type;
}
- mem_type = find_data_type(ms, ip, op_loc, addr, var_name);
if (mem_type)
istat->good++;
else
istat->bad++;
- if (mem_type && var_name)
- op_loc->offset = var_offset;
-
if (symbol_conf.annotate_data_sample) {
annotated_data_type__update_samples(mem_type, evsel,
- op_loc->offset,
+ dloc.type_offset,
he->stat.nr_events,
he->stat.period);
}
- he->mem_type_off = op_loc->offset;
+ he->mem_type_off = dloc.type_offset;
return mem_type;
}
@@ -3936,10 +2440,13 @@ retry:
* from the previous instruction.
*/
if (dl->al.offset > 0) {
+ struct annotation *notes;
struct disasm_line *prev_dl;
- prev_dl = list_prev_entry(dl, al.node);
- if (ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
+ notes = symbol__annotation(ms->sym);
+ prev_dl = annotation__prev_asm_line(notes, dl);
+
+ if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) {
dl = prev_dl;
goto retry;
}
@@ -3949,3 +2456,227 @@ retry:
istat->bad++;
return NULL;
}
+
+/* Basic block traversal (BFS) data structure */
+struct basic_block_data {
+ struct list_head queue;
+ struct list_head visited;
+};
+
+/*
+ * During the traversal, it needs to know the parent block where the current
+ * block block started from. Note that single basic block can be parent of
+ * two child basic blocks (in case of condition jump).
+ */
+struct basic_block_link {
+ struct list_head node;
+ struct basic_block_link *parent;
+ struct annotated_basic_block *bb;
+};
+
+/* Check any of basic block in the list already has the offset */
+static bool basic_block_has_offset(struct list_head *head, s64 offset)
+{
+ struct basic_block_link *link;
+
+ list_for_each_entry(link, head, node) {
+ s64 begin_offset = link->bb->begin->al.offset;
+ s64 end_offset = link->bb->end->al.offset;
+
+ if (begin_offset <= offset && offset <= end_offset)
+ return true;
+ }
+ return false;
+}
+
+static bool is_new_basic_block(struct basic_block_data *bb_data,
+ struct disasm_line *dl)
+{
+ s64 offset = dl->al.offset;
+
+ if (basic_block_has_offset(&bb_data->visited, offset))
+ return false;
+ if (basic_block_has_offset(&bb_data->queue, offset))
+ return false;
+ return true;
+}
+
+/* Add a basic block starting from dl and link it to the parent */
+static int add_basic_block(struct basic_block_data *bb_data,
+ struct basic_block_link *parent,
+ struct disasm_line *dl)
+{
+ struct annotated_basic_block *bb;
+ struct basic_block_link *link;
+
+ if (dl == NULL)
+ return -1;
+
+ if (!is_new_basic_block(bb_data, dl))
+ return 0;
+
+ bb = zalloc(sizeof(*bb));
+ if (bb == NULL)
+ return -1;
+
+ bb->begin = dl;
+ bb->end = dl;
+ INIT_LIST_HEAD(&bb->list);
+
+ link = malloc(sizeof(*link));
+ if (link == NULL) {
+ free(bb);
+ return -1;
+ }
+
+ link->bb = bb;
+ link->parent = parent;
+ list_add_tail(&link->node, &bb_data->queue);
+ return 0;
+}
+
+/* Returns true when it finds the target in the current basic block */
+static bool process_basic_block(struct basic_block_data *bb_data,
+ struct basic_block_link *link,
+ struct symbol *sym, u64 target)
+{
+ struct disasm_line *dl, *next_dl, *last_dl;
+ struct annotation *notes = symbol__annotation(sym);
+ bool found = false;
+
+ dl = link->bb->begin;
+ /* Check if it's already visited */
+ if (basic_block_has_offset(&bb_data->visited, dl->al.offset))
+ return false;
+
+ last_dl = list_last_entry(&notes->src->source,
+ struct disasm_line, al.node);
+ if (last_dl->al.offset == -1)
+ last_dl = annotation__prev_asm_line(notes, last_dl);
+
+ if (last_dl == NULL)
+ return false;
+
+ list_for_each_entry_from(dl, &notes->src->source, al.node) {
+ /* Skip comment or debug info line */
+ if (dl->al.offset == -1)
+ continue;
+ /* Found the target instruction */
+ if (sym->start + dl->al.offset == target) {
+ found = true;
+ break;
+ }
+ /* End of the function, finish the block */
+ if (dl == last_dl)
+ break;
+ /* 'return' instruction finishes the block */
+ if (ins__is_ret(&dl->ins))
+ break;
+ /* normal instructions are part of the basic block */
+ if (!ins__is_jump(&dl->ins))
+ continue;
+ /* jump to a different function, tail call or return */
+ if (dl->ops.target.outside)
+ break;
+ /* jump instruction creates new basic block(s) */
+ next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset,
+ /*allow_update=*/false);
+ if (next_dl)
+ add_basic_block(bb_data, link, next_dl);
+
+ /*
+ * FIXME: determine conditional jumps properly.
+ * Conditional jumps create another basic block with the
+ * next disasm line.
+ */
+ if (!strstr(dl->ins.name, "jmp")) {
+ next_dl = annotation__next_asm_line(notes, dl);
+ if (next_dl)
+ add_basic_block(bb_data, link, next_dl);
+ }
+ break;
+
+ }
+ link->bb->end = dl;
+ return found;
+}
+
+/*
+ * It founds a target basic block, build a proper linked list of basic blocks
+ * by following the link recursively.
+ */
+static void link_found_basic_blocks(struct basic_block_link *link,
+ struct list_head *head)
+{
+ while (link) {
+ struct basic_block_link *parent = link->parent;
+
+ list_move(&link->bb->list, head);
+ list_del(&link->node);
+ free(link);
+
+ link = parent;
+ }
+}
+
+static void delete_basic_blocks(struct basic_block_data *bb_data)
+{
+ struct basic_block_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &bb_data->queue, node) {
+ list_del(&link->node);
+ zfree(&link->bb);
+ free(link);
+ }
+
+ list_for_each_entry_safe(link, tmp, &bb_data->visited, node) {
+ list_del(&link->node);
+ zfree(&link->bb);
+ free(link);
+ }
+}
+
+/**
+ * annotate_get_basic_blocks - Get basic blocks for given address range
+ * @sym: symbol to annotate
+ * @src: source address
+ * @dst: destination address
+ * @head: list head to save basic blocks
+ *
+ * This function traverses disasm_lines from @src to @dst and save them in a
+ * list of annotated_basic_block to @head. It uses BFS to find the shortest
+ * path between two. The basic_block_link is to maintain parent links so
+ * that it can build a list of blocks from the start.
+ */
+int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
+ struct list_head *head)
+{
+ struct basic_block_data bb_data = {
+ .queue = LIST_HEAD_INIT(bb_data.queue),
+ .visited = LIST_HEAD_INIT(bb_data.visited),
+ };
+ struct basic_block_link *link;
+ struct disasm_line *dl;
+ int ret = -1;
+
+ dl = find_disasm_line(sym, src, /*allow_update=*/false);
+ if (dl == NULL)
+ return -1;
+
+ if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0)
+ return -1;
+
+ /* Find shortest path from src to dst using BFS */
+ while (!list_empty(&bb_data.queue)) {
+ link = list_first_entry(&bb_data.queue, struct basic_block_link, node);
+
+ if (process_basic_block(&bb_data, link, sym, dst)) {
+ link_found_basic_blocks(link, head);
+ ret = 0;
+ break;
+ }
+ list_move(&link->node, &bb_data.visited);
+ }
+ delete_basic_blocks(&bb_data);
+ return ret;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 13cc659e50..d5c821c22f 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -13,10 +13,10 @@
#include "mutex.h"
#include "spark.h"
#include "hashmap.h"
+#include "disasm.h"
struct hist_browser_timer;
struct hist_entry;
-struct ins_ops;
struct map;
struct map_symbol;
struct addr_map_symbol;
@@ -26,59 +26,6 @@ struct evsel;
struct symbol;
struct annotated_data_type;
-struct ins {
- const char *name;
- struct ins_ops *ops;
-};
-
-struct ins_operands {
- char *raw;
- struct {
- char *raw;
- char *name;
- struct symbol *sym;
- u64 addr;
- s64 offset;
- bool offset_avail;
- bool outside;
- bool multi_regs;
- } target;
- union {
- struct {
- char *raw;
- char *name;
- u64 addr;
- bool multi_regs;
- } source;
- struct {
- struct ins ins;
- struct ins_operands *ops;
- } locked;
- struct {
- char *raw_comment;
- char *raw_func_start;
- } jump;
- };
-};
-
-struct arch;
-
-bool arch__is(struct arch *arch, const char *name);
-
-struct ins_ops {
- void (*free)(struct ins_operands *ops);
- int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
- int (*scnprintf)(struct ins *ins, char *bf, size_t size,
- struct ins_operands *ops, int max_ins_name);
-};
-
-bool ins__is_jump(const struct ins *ins);
-bool ins__is_call(const struct ins *ins);
-bool ins__is_ret(const struct ins *ins);
-bool ins__is_lock(const struct ins *ins);
-int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops, int max_ins_name);
-bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
-
#define ANNOTATION__IPC_WIDTH 6
#define ANNOTATION__CYCLES_WIDTH 6
#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
@@ -171,6 +118,8 @@ struct disasm_line {
struct annotation_line al;
};
+void annotation_line__add(struct annotation_line *al, struct list_head *head);
+
static inline double annotation_data__percent(struct annotation_data *data,
unsigned int which)
{
@@ -212,7 +161,6 @@ static inline bool disasm_line__has_local_offset(const struct disasm_line *dl)
*/
bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym);
-void disasm_line__free(struct disasm_line *dl);
struct annotation_line *
annotation_line__next(struct annotation_line *pos, struct list_head *head);
@@ -235,7 +183,6 @@ int __annotation__scnprintf_samples_period(struct annotation *notes,
struct evsel *evsel,
bool show_freq);
-int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name);
size_t disasm__fprintf(struct list_head *head, FILE *fp);
void symbol__calc_percent(struct symbol *sym, struct evsel *evsel);
@@ -299,12 +246,14 @@ struct cyc_hist {
* we have more than a group in a evlist, where we will want
* to see each group separately, that is why symbol__annotate2()
* sets src->nr_histograms to evsel->nr_members.
- * @offsets: Array of annotation_line to be accessed by offset.
* @samples: Hash map of sym_hist_entry. Keyed by event index and offset in symbol.
+ * @nr_events: Number of events in the current output.
* @nr_entries: Number of annotated_line in the source list.
* @nr_asm_entries: Number of annotated_line with actual asm instruction in the
* source list.
- * @max_line_len: Maximum length of objdump output in an annotated_line.
+ * @max_jump_sources: Maximum number of jump instructions targeting to the same
+ * instruction.
+ * @widths: Precalculated width of each column in the TUI output.
*
* disasm_lines are allocated, percentages calculated and all sorted by percentage
* when the annotation is about to be presented, so the percentages are for
@@ -315,14 +264,27 @@ struct cyc_hist {
struct annotated_source {
struct list_head source;
struct sym_hist *histograms;
- struct annotation_line **offsets;
struct hashmap *samples;
int nr_histograms;
+ int nr_events;
int nr_entries;
int nr_asm_entries;
- u16 max_line_len;
+ int max_jump_sources;
+ u64 start;
+ struct {
+ u8 addr;
+ u8 jumps;
+ u8 target;
+ u8 min_addr;
+ u8 max_addr;
+ u8 max_ins_name;
+ u16 max_line_len;
+ } widths;
};
+struct annotation_line *annotated_source__get_line(struct annotated_source *src,
+ s64 offset);
+
/**
* struct annotated_branch - basic block and IPC information for a symbol.
*
@@ -351,17 +313,6 @@ struct annotated_branch {
};
struct LOCKABLE annotation {
- u64 start;
- int nr_events;
- int max_jump_sources;
- struct {
- u8 addr;
- u8 jumps;
- u8 target;
- u8 min_addr;
- u8 max_addr;
- u8 max_ins_name;
- } widths;
struct annotated_source *src;
struct annotated_branch *branch;
};
@@ -385,7 +336,7 @@ static inline int annotation__cycles_width(struct annotation *notes)
static inline int annotation__pcnt_width(struct annotation *notes)
{
- return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events;
+ return (symbol_conf.show_total_period ? 12 : 7) * notes->src->nr_events;
}
static inline bool annotation_line__filter(struct annotation_line *al)
@@ -393,10 +344,7 @@ static inline bool annotation_line__filter(struct annotation_line *al)
return annotate_opts.hide_src_code && al->offset == -1;
}
-void annotation__set_offsets(struct annotation *notes, s64 size);
-void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym);
void annotation__update_column_widths(struct annotation *notes);
-void annotation__init_column_widths(struct annotation *notes, struct symbol *sym);
void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms);
static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx)
@@ -511,15 +459,19 @@ int annotate_check_args(void);
* @reg1: First register in the operand
* @reg2: Second register in the operand
* @offset: Memory access offset in the operand
+ * @segment: Segment selector register
* @mem_ref: Whether the operand accesses memory
* @multi_regs: Whether the second register is used
+ * @imm: Whether the operand is an immediate value (in offset)
*/
struct annotated_op_loc {
int reg1;
int reg2;
int offset;
+ u8 segment;
bool mem_ref;
bool multi_regs;
+ bool imm;
};
enum annotated_insn_ops {
@@ -529,6 +481,17 @@ enum annotated_insn_ops {
INSN_OP_MAX,
};
+enum annotated_x86_segment {
+ INSN_SEG_NONE = 0,
+
+ INSN_SEG_X86_CS,
+ INSN_SEG_X86_DS,
+ INSN_SEG_X86_ES,
+ INSN_SEG_X86_FS,
+ INSN_SEG_X86_GS,
+ INSN_SEG_X86_SS,
+};
+
/**
* struct annotated_insn_loc - Location info of instruction
* @ops: Array of location info for source and target operands
@@ -561,4 +524,20 @@ extern struct list_head ann_insn_stat;
u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset,
struct disasm_line *dl);
+/**
+ * struct annotated_basic_block - Basic block of instructions
+ * @list: List node
+ * @begin: start instruction in the block
+ * @end: end instruction in the block
+ */
+struct annotated_basic_block {
+ struct list_head list;
+ struct disasm_line *begin;
+ struct disasm_line *end;
+};
+
+/* Get a list of basic blocks from src to dst addresses */
+int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst,
+ struct list_head *head);
+
#endif /* __PERF_ANNOTATE_H */
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index ef314a5797..e2f317063e 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
struct evlist *evlist,
struct evsel *evsel, int idx)
{
- bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
+ bool per_cpu = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus);
mp->mmap_needed = evsel->needs_auxtrace_mmap;
@@ -218,15 +218,20 @@ static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
return queue_array;
}
-int auxtrace_queues__init(struct auxtrace_queues *queues)
+int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues)
{
- queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
+ queues->nr_queues = nr_queues;
queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
if (!queues->queue_array)
return -ENOMEM;
return 0;
}
+int auxtrace_queues__init(struct auxtrace_queues *queues)
+{
+ return auxtrace_queues__init_nr(queues, AUXTRACE_INIT_NR_QUEUES);
+}
+
static int auxtrace_queues__grow(struct auxtrace_queues *queues,
unsigned int new_nr_queues)
{
@@ -648,7 +653,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{
- bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus);
+ bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus);
if (per_cpu_mmaps) {
struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx);
@@ -2654,7 +2659,7 @@ static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
}
filt->addr = 0;
- filt->size = dso->data.file_size;
+ filt->size = dso__data(dso)->file_size;
return 0;
}
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 55702215a8..8a6ec95658 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -521,6 +521,7 @@ int auxtrace_mmap__read_snapshot(struct mmap *map,
struct perf_tool *tool, process_auxtrace_t fn,
size_t snapshot_size);
+int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues);
int auxtrace_queues__init(struct auxtrace_queues *queues);
int auxtrace_queues__add_event(struct auxtrace_queues *queues,
struct perf_session *session,
diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c
index dec9109897..04068d4868 100644
--- a/tools/perf/util/block-info.c
+++ b/tools/perf/util/block-info.c
@@ -43,26 +43,14 @@ static struct block_header_column {
}
};
-struct block_info *block_info__get(struct block_info *bi)
-{
- if (bi)
- refcount_inc(&bi->refcnt);
- return bi;
-}
-
-void block_info__put(struct block_info *bi)
+struct block_info *block_info__new(void)
{
- if (bi && refcount_dec_and_test(&bi->refcnt))
- free(bi);
+ return zalloc(sizeof(struct block_info));
}
-struct block_info *block_info__new(void)
+void block_info__delete(struct block_info *bi)
{
- struct block_info *bi = zalloc(sizeof(*bi));
-
- if (bi)
- refcount_set(&bi->refcnt, 1);
- return bi;
+ free(bi);
}
int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right)
@@ -148,7 +136,7 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh,
he_block = hists__add_entry_block(&bh->block_hists,
&al, bi);
if (!he_block) {
- block_info__put(bi);
+ block_info__delete(bi);
return -1;
}
}
@@ -319,7 +307,7 @@ static int block_dso_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
if (map && map__dso(map)) {
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
- map__dso(map)->short_name);
+ dso__short_name(map__dso(map)));
}
return scnprintf(hpp->buf, hpp->size, "%*s", block_fmt->width,
diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h
index 96f53e8979..0b9e1aad4c 100644
--- a/tools/perf/util/block-info.h
+++ b/tools/perf/util/block-info.h
@@ -3,7 +3,6 @@
#define __PERF_BLOCK_H
#include <linux/types.h>
-#include <linux/refcount.h>
#include "hist.h"
#include "symbol.h"
#include "sort.h"
@@ -19,7 +18,6 @@ struct block_info {
u64 total_cycles;
int num;
int num_aggr;
- refcount_t refcnt;
};
struct block_fmt {
@@ -48,19 +46,8 @@ struct block_report {
int nr_fmts;
};
-struct block_hist;
-
struct block_info *block_info__new(void);
-struct block_info *block_info__get(struct block_info *bi);
-void block_info__put(struct block_info *bi);
-
-static inline void __block_info__zput(struct block_info **bi)
-{
- block_info__put(*bi);
- *bi = NULL;
-}
-
-#define block_info__zput(bi) __block_info__zput(&bi)
+void block_info__delete(struct block_info *bi);
int64_t __block_info__cmp(struct hist_entry *left, struct hist_entry *right);
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index 83709146a4..827695cd04 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -59,10 +59,10 @@ static int machine__process_bpf_event_load(struct machine *machine,
if (map) {
struct dso *dso = map__dso(map);
- dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
- dso->bpf_prog.id = id;
- dso->bpf_prog.sub_id = i;
- dso->bpf_prog.env = env;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_PROG_INFO);
+ dso__bpf_prog(dso)->id = id;
+ dso__bpf_prog(dso)->sub_id = i;
+ dso__bpf_prog(dso)->env = env;
map__put(map);
}
}
diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
index 1c82377ed7..ea29c372f3 100644
--- a/tools/perf/util/bpf_counter_cgroup.c
+++ b/tools/perf/util/bpf_counter_cgroup.c
@@ -136,9 +136,8 @@ static int bperf_load_program(struct evlist *evlist)
cgrp = evsel->cgrp;
if (read_cgroup_id(cgrp) < 0) {
- pr_err("Failed to get cgroup id\n");
- err = -1;
- goto out;
+ pr_debug("Failed to get cgroup id for %s\n", cgrp->name);
+ cgrp->id = 0;
}
map_fd = bpf_map__fd(skel->maps.cgrp_idx);
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c
index 6eb2c78fd7..44f0f708a1 100644
--- a/tools/perf/util/bpf_kwork.c
+++ b/tools/perf/util/bpf_kwork.c
@@ -147,12 +147,12 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
static int setup_filters(struct perf_kwork *kwork)
{
- u8 val = 1;
- int i, nr_cpus, key, fd;
- struct perf_cpu_map *map;
-
if (kwork->cpu_list != NULL) {
- fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
+ int idx, nr_cpus;
+ struct perf_cpu_map *map;
+ struct perf_cpu cpu;
+ int fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
+
if (fd < 0) {
pr_debug("Invalid cpu filter fd\n");
return -1;
@@ -165,8 +165,8 @@ static int setup_filters(struct perf_kwork *kwork)
}
nr_cpus = libbpf_num_possible_cpus();
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ u8 val = 1;
if (cpu.cpu >= nr_cpus) {
perf_cpu_map__put(map);
@@ -181,6 +181,8 @@ static int setup_filters(struct perf_kwork *kwork)
}
if (kwork->profile_name != NULL) {
+ int key, fd;
+
if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
pr_err("Requested name filter %s too large, limit to %d\n",
kwork->profile_name, MAX_KWORKNAME - 1);
diff --git a/tools/perf/util/bpf_kwork_top.c b/tools/perf/util/bpf_kwork_top.c
index 035e022727..22a3b00a1e 100644
--- a/tools/perf/util/bpf_kwork_top.c
+++ b/tools/perf/util/bpf_kwork_top.c
@@ -122,11 +122,11 @@ static bool valid_kwork_class_type(enum kwork_class_type type)
static int setup_filters(struct perf_kwork *kwork)
{
- u8 val = 1;
- int i, nr_cpus, fd;
- struct perf_cpu_map *map;
-
if (kwork->cpu_list) {
+ int idx, nr_cpus, fd;
+ struct perf_cpu_map *map;
+ struct perf_cpu cpu;
+
fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter);
if (fd < 0) {
pr_debug("Invalid cpu filter fd\n");
@@ -140,8 +140,8 @@ static int setup_filters(struct perf_kwork *kwork)
}
nr_cpus = libbpf_num_possible_cpus();
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ u8 val = 1;
if (cpu.cpu >= nr_cpus) {
perf_cpu_map__put(map);
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 2872f9bc07..0acbd74e8c 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -341,6 +341,27 @@ failure:
return 1; /* Failure: don't filter */
}
+SEC("tp/syscalls/sys_enter_nanosleep")
+int sys_enter_nanosleep(struct syscall_enter_args *args)
+{
+ struct augmented_args_payload *augmented_args = augmented_args_payload();
+ const void *req_arg = (const void *)args->args[0];
+ unsigned int len = sizeof(augmented_args->args);
+ __u32 size = sizeof(struct timespec64);
+
+ if (augmented_args == NULL)
+ goto failure;
+
+ if (size > sizeof(augmented_args->__data))
+ goto failure;
+
+ bpf_probe_read_user(&augmented_args->__data, size, req_arg);
+
+ return augmented__output(args, augmented_args, len + size);
+failure:
+ return 1; /* Failure: don't filter */
+}
+
static pid_t getpid(void)
{
return bpf_get_current_pid_tgid();
diff --git a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c
index 2c55896bb3..a01c7f791f 100644
--- a/tools/perf/util/bpf_skel/bench_uprobe.bpf.c
+++ b/tools/perf/util/bpf_skel/bench_uprobe.bpf.c
@@ -4,6 +4,7 @@
#include <bpf/bpf_tracing.h>
unsigned int nr_uprobes;
+unsigned int nr_uretprobes;
SEC("uprobe")
int BPF_UPROBE(empty)
@@ -20,4 +21,19 @@ int BPF_UPROBE(trace_printk)
return 0;
}
+SEC("uretprobe")
+int BPF_URETPROBE(empty_ret)
+{
+ return 0;
+}
+
+SEC("uretprobe")
+int BPF_URETPROBE(trace_printk_ret)
+{
+ char fmt[] = "perf bench uretprobe %u";
+
+ bpf_trace_printk(fmt, sizeof(fmt), ++nr_uretprobes);
+ return 0;
+}
+
char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 03c64b8538..83a1581e8c 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -60,7 +60,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
addr_location__init(&al);
if (thread__find_map(thread, sample->cpumode, sample->ip, &al))
- map__dso(al.map)->hit = 1;
+ dso__set_hit(map__dso(al.map));
addr_location__exit(&al);
thread__put(thread);
@@ -272,10 +272,10 @@ char *__dso__build_id_filename(const struct dso *dso, char *bf, size_t size,
bool alloc = (bf == NULL);
int ret;
- if (!dso->has_build_id)
+ if (!dso__has_build_id(dso))
return NULL;
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid_const(dso), sbuild_id);
linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
if (!linkname)
return NULL;
@@ -327,48 +327,56 @@ static int write_buildid(const char *name, size_t name_len, struct build_id *bid
return write_padded(fd, name, name_len + 1, len);
}
-static int machine__write_buildid_table(struct machine *machine,
- struct feat_fd *fd)
+struct machine__write_buildid_table_cb_args {
+ struct machine *machine;
+ struct feat_fd *fd;
+ u16 kmisc, umisc;
+};
+
+static int machine__write_buildid_table_cb(struct dso *dso, void *data)
{
- int err = 0;
- struct dso *pos;
- u16 kmisc = PERF_RECORD_MISC_KERNEL,
- umisc = PERF_RECORD_MISC_USER;
+ struct machine__write_buildid_table_cb_args *args = data;
+ const char *name;
+ size_t name_len;
+ bool in_kernel = false;
- if (!machine__is_host(machine)) {
- kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
- umisc = PERF_RECORD_MISC_GUEST_USER;
- }
+ if (!dso__has_build_id(dso))
+ return 0;
- dsos__for_each_with_build_id(pos, &machine->dsos.head) {
- const char *name;
- size_t name_len;
- bool in_kernel = false;
+ if (!dso__hit(dso) && !dso__is_vdso(dso))
+ return 0;
- if (!pos->hit && !dso__is_vdso(pos))
- continue;
+ if (dso__is_vdso(dso)) {
+ name = dso__short_name(dso);
+ name_len = dso__short_name_len(dso);
+ } else if (dso__is_kcore(dso)) {
+ name = args->machine->mmap_name;
+ name_len = strlen(name);
+ } else {
+ name = dso__long_name(dso);
+ name_len = dso__long_name_len(dso);
+ }
- if (dso__is_vdso(pos)) {
- name = pos->short_name;
- name_len = pos->short_name_len;
- } else if (dso__is_kcore(pos)) {
- name = machine->mmap_name;
- name_len = strlen(name);
- } else {
- name = pos->long_name;
- name_len = pos->long_name_len;
- }
+ in_kernel = dso__kernel(dso) || is_kernel_module(name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
+ return write_buildid(name, name_len, dso__bid(dso), args->machine->pid,
+ in_kernel ? args->kmisc : args->umisc, args->fd);
+}
- in_kernel = pos->kernel ||
- is_kernel_module(name,
- PERF_RECORD_MISC_CPUMODE_UNKNOWN);
- err = write_buildid(name, name_len, &pos->bid, machine->pid,
- in_kernel ? kmisc : umisc, fd);
- if (err)
- break;
+static int machine__write_buildid_table(struct machine *machine, struct feat_fd *fd)
+{
+ struct machine__write_buildid_table_cb_args args = {
+ .machine = machine,
+ .fd = fd,
+ .kmisc = PERF_RECORD_MISC_KERNEL,
+ .umisc = PERF_RECORD_MISC_USER,
+ };
+
+ if (!machine__is_host(machine)) {
+ args.kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+ args.umisc = PERF_RECORD_MISC_GUEST_USER;
}
- return err;
+ return dsos__for_each_dso(&machine->dsos, machine__write_buildid_table_cb, &args);
}
int perf_session__write_buildid_table(struct perf_session *session,
@@ -390,42 +398,6 @@ int perf_session__write_buildid_table(struct perf_session *session,
return err;
}
-static int __dsos__hit_all(struct list_head *head)
-{
- struct dso *pos;
-
- list_for_each_entry(pos, head, node)
- pos->hit = true;
-
- return 0;
-}
-
-static int machine__hit_all_dsos(struct machine *machine)
-{
- return __dsos__hit_all(&machine->dsos.head);
-}
-
-int dsos__hit_all(struct perf_session *session)
-{
- struct rb_node *nd;
- int err;
-
- err = machine__hit_all_dsos(&session->machines.host);
- if (err)
- return err;
-
- for (nd = rb_first_cached(&session->machines.guests); nd;
- nd = rb_next(nd)) {
- struct machine *pos = rb_entry(nd, struct machine, rb_node);
-
- err = machine__hit_all_dsos(pos);
- if (err)
- return err;
- }
-
- return 0;
-}
-
void disable_buildid_cache(void)
{
no_buildid_cache = true;
@@ -904,11 +876,11 @@ static bool dso__build_id_mismatch(struct dso *dso, const char *name)
struct build_id bid;
bool ret = false;
- mutex_lock(&dso->lock);
- if (filename__read_build_id_ns(name, &bid, dso->nsinfo) >= 0)
+ mutex_lock(dso__lock(dso));
+ if (filename__read_build_id_ns(name, &bid, dso__nsinfo(dso)) >= 0)
ret = !dso__build_id_equal(dso, &bid);
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
return ret;
}
@@ -918,13 +890,13 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
{
bool is_kallsyms = dso__is_kallsyms(dso);
bool is_vdso = dso__is_vdso(dso);
- const char *name = dso->long_name;
+ const char *name = dso__long_name(dso);
const char *proper_name = NULL;
const char *root_dir = NULL;
char *allocated_name = NULL;
int ret = 0;
- if (!dso->has_build_id)
+ if (!dso__has_build_id(dso))
return 0;
if (dso__is_kcore(dso)) {
@@ -949,10 +921,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
if (!is_kallsyms && dso__build_id_mismatch(dso, name))
goto out_free;
- mutex_lock(&dso->lock);
- ret = build_id_cache__add_b(&dso->bid, name, dso->nsinfo,
+ mutex_lock(dso__lock(dso));
+ ret = build_id_cache__add_b(dso__bid(dso), name, dso__nsinfo(dso),
is_kallsyms, is_vdso, proper_name, root_dir);
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
out_free:
free(allocated_name);
return ret;
@@ -992,7 +964,7 @@ int perf_session__cache_build_ids(struct perf_session *session)
static bool machine__read_build_ids(struct machine *machine, bool with_hits)
{
- return __dsos__read_build_ids(&machine->dsos.head, with_hits);
+ return dsos__read_build_ids(&machine->dsos, with_hits);
}
bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
diff --git a/tools/perf/util/build-id.h b/tools/perf/util/build-id.h
index 4e3a116937..3fa8bffb07 100644
--- a/tools/perf/util/build-id.h
+++ b/tools/perf/util/build-id.h
@@ -39,8 +39,6 @@ int build_id__mark_dso_hit(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
struct machine *machine);
-int dsos__hit_all(struct perf_session *session);
-
int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct evsel *evsel,
struct machine *machine);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 7517d16c02..1730b852a9 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -606,7 +606,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
call->brtype_stat = zalloc(sizeof(*call->brtype_stat));
if (!call->brtype_stat) {
perror("not enough memory for the code path branch statistics");
- free(call->brtype_stat);
+ zfree(&call->brtype_stat);
return -ENOMEM;
}
}
@@ -1205,7 +1205,7 @@ char *callchain_list__sym_name(struct callchain_list *cl,
if (show_dso)
scnprintf(bf + printed, bfsize - printed, " %s",
cl->ms.map ?
- map__dso(cl->ms.map)->short_name :
+ dso__short_name(map__dso(cl->ms.map)) :
"unknown");
return bf;
diff --git a/tools/perf/util/cgroup.c b/tools/perf/util/cgroup.c
index fcb5090584..0f759dd96d 100644
--- a/tools/perf/util/cgroup.c
+++ b/tools/perf/util/cgroup.c
@@ -465,9 +465,11 @@ int evlist__expand_cgroup(struct evlist *evlist, const char *str,
name = cn->name + prefix_len;
if (name[0] == '/' && name[1])
name++;
+
+ /* the cgroup can go away in the meantime */
cgrp = cgroup__new(name, open_cgroup);
if (cgrp == NULL)
- goto out_err;
+ continue;
leader = NULL;
evlist__for_each_entry(orig_list, pos) {
diff --git a/tools/perf/util/comm.c b/tools/perf/util/comm.c
index afb8d4fd26..49b79cf0c5 100644
--- a/tools/perf/util/comm.c
+++ b/tools/perf/util/comm.c
@@ -1,108 +1,188 @@
// SPDX-License-Identifier: GPL-2.0
#include "comm.h"
#include <errno.h>
-#include <stdlib.h>
-#include <stdio.h>
#include <string.h>
+#include <internal/rc_check.h>
#include <linux/refcount.h>
-#include <linux/rbtree.h>
#include <linux/zalloc.h>
#include "rwsem.h"
-struct comm_str {
- char *str;
- struct rb_node rb_node;
+DECLARE_RC_STRUCT(comm_str) {
refcount_t refcnt;
+ char str[];
};
-/* Should perhaps be moved to struct machine */
-static struct rb_root comm_str_root;
-static struct rw_semaphore comm_str_lock = {.lock = PTHREAD_RWLOCK_INITIALIZER,};
+static struct comm_strs {
+ struct rw_semaphore lock;
+ struct comm_str **strs;
+ int num_strs;
+ int capacity;
+} _comm_strs;
+
+static void comm_strs__remove_if_last(struct comm_str *cs);
+
+static void comm_strs__init(void)
+{
+ init_rwsem(&_comm_strs.lock);
+ _comm_strs.capacity = 16;
+ _comm_strs.num_strs = 0;
+ _comm_strs.strs = calloc(16, sizeof(*_comm_strs.strs));
+}
+
+static struct comm_strs *comm_strs__get(void)
+{
+ static pthread_once_t comm_strs_type_once = PTHREAD_ONCE_INIT;
+
+ pthread_once(&comm_strs_type_once, comm_strs__init);
+
+ return &_comm_strs;
+}
+
+static refcount_t *comm_str__refcnt(struct comm_str *cs)
+{
+ return &RC_CHK_ACCESS(cs)->refcnt;
+}
+
+static const char *comm_str__str(const struct comm_str *cs)
+{
+ return &RC_CHK_ACCESS(cs)->str[0];
+}
static struct comm_str *comm_str__get(struct comm_str *cs)
{
- if (cs && refcount_inc_not_zero(&cs->refcnt))
- return cs;
+ struct comm_str *result;
- return NULL;
+ if (RC_CHK_GET(result, cs))
+ refcount_inc_not_zero(comm_str__refcnt(cs));
+
+ return result;
}
static void comm_str__put(struct comm_str *cs)
{
- if (cs && refcount_dec_and_test(&cs->refcnt)) {
- down_write(&comm_str_lock);
- rb_erase(&cs->rb_node, &comm_str_root);
- up_write(&comm_str_lock);
- zfree(&cs->str);
- free(cs);
+ if (!cs)
+ return;
+
+ if (refcount_dec_and_test(comm_str__refcnt(cs))) {
+ RC_CHK_FREE(cs);
+ } else {
+ if (refcount_read(comm_str__refcnt(cs)) == 1)
+ comm_strs__remove_if_last(cs);
+
+ RC_CHK_PUT(cs);
}
}
-static struct comm_str *comm_str__alloc(const char *str)
+static struct comm_str *comm_str__new(const char *str)
{
- struct comm_str *cs;
-
- cs = zalloc(sizeof(*cs));
- if (!cs)
- return NULL;
+ struct comm_str *result = NULL;
+ RC_STRUCT(comm_str) *cs;
- cs->str = strdup(str);
- if (!cs->str) {
- free(cs);
- return NULL;
+ cs = malloc(sizeof(*cs) + strlen(str) + 1);
+ if (ADD_RC_CHK(result, cs)) {
+ refcount_set(comm_str__refcnt(result), 1);
+ strcpy(&cs->str[0], str);
}
+ return result;
+}
- refcount_set(&cs->refcnt, 1);
+static int comm_str__search(const void *_key, const void *_member)
+{
+ const char *key = _key;
+ const struct comm_str *member = *(const struct comm_str * const *)_member;
- return cs;
+ return strcmp(key, comm_str__str(member));
}
-static
-struct comm_str *__comm_str__findnew(const char *str, struct rb_root *root)
+static void comm_strs__remove_if_last(struct comm_str *cs)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct comm_str *iter, *new;
- int cmp;
-
- while (*p != NULL) {
- parent = *p;
- iter = rb_entry(parent, struct comm_str, rb_node);
-
- /*
- * If we race with comm_str__put, iter->refcnt is 0
- * and it will be removed within comm_str__put call
- * shortly, ignore it in this search.
- */
- cmp = strcmp(str, iter->str);
- if (!cmp && comm_str__get(iter))
- return iter;
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
+ struct comm_strs *comm_strs = comm_strs__get();
+
+ down_write(&comm_strs->lock);
+ /*
+ * Are there only references from the array, if so remove the array
+ * reference under the write lock so that we don't race with findnew.
+ */
+ if (refcount_read(comm_str__refcnt(cs)) == 1) {
+ struct comm_str **entry;
+
+ entry = bsearch(comm_str__str(cs), comm_strs->strs, comm_strs->num_strs,
+ sizeof(struct comm_str *), comm_str__search);
+ comm_str__put(*entry);
+ for (int i = entry - comm_strs->strs; i < comm_strs->num_strs - 1; i++)
+ comm_strs->strs[i] = comm_strs->strs[i + 1];
+ comm_strs->num_strs--;
}
+ up_write(&comm_strs->lock);
+}
- new = comm_str__alloc(str);
- if (!new)
- return NULL;
+static struct comm_str *__comm_strs__find(struct comm_strs *comm_strs, const char *str)
+{
+ struct comm_str **result;
- rb_link_node(&new->rb_node, parent, p);
- rb_insert_color(&new->rb_node, root);
+ result = bsearch(str, comm_strs->strs, comm_strs->num_strs, sizeof(struct comm_str *),
+ comm_str__search);
- return new;
+ if (!result)
+ return NULL;
+
+ return comm_str__get(*result);
}
-static struct comm_str *comm_str__findnew(const char *str, struct rb_root *root)
+static struct comm_str *comm_strs__findnew(const char *str)
{
- struct comm_str *cs;
+ struct comm_strs *comm_strs = comm_strs__get();
+ struct comm_str *result;
- down_write(&comm_str_lock);
- cs = __comm_str__findnew(str, root);
- up_write(&comm_str_lock);
+ if (!comm_strs)
+ return NULL;
- return cs;
+ down_read(&comm_strs->lock);
+ result = __comm_strs__find(comm_strs, str);
+ up_read(&comm_strs->lock);
+ if (result)
+ return result;
+
+ down_write(&comm_strs->lock);
+ result = __comm_strs__find(comm_strs, str);
+ if (!result) {
+ if (comm_strs->num_strs == comm_strs->capacity) {
+ struct comm_str **tmp;
+
+ tmp = reallocarray(comm_strs->strs,
+ comm_strs->capacity + 16,
+ sizeof(*comm_strs->strs));
+ if (!tmp) {
+ up_write(&comm_strs->lock);
+ return NULL;
+ }
+ comm_strs->strs = tmp;
+ comm_strs->capacity += 16;
+ }
+ result = comm_str__new(str);
+ if (result) {
+ int low = 0, high = comm_strs->num_strs - 1;
+ int insert = comm_strs->num_strs; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = strcmp(comm_str__str(comm_strs->strs[mid]), str);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&comm_strs->strs[insert + 1], &comm_strs->strs[insert],
+ (comm_strs->num_strs - insert) * sizeof(struct comm_str *));
+ comm_strs->num_strs++;
+ comm_strs->strs[insert] = result;
+ }
+ }
+ up_write(&comm_strs->lock);
+ return comm_str__get(result);
}
struct comm *comm__new(const char *str, u64 timestamp, bool exec)
@@ -115,7 +195,7 @@ struct comm *comm__new(const char *str, u64 timestamp, bool exec)
comm->start = timestamp;
comm->exec = exec;
- comm->comm_str = comm_str__findnew(str, &comm_str_root);
+ comm->comm_str = comm_strs__findnew(str);
if (!comm->comm_str) {
free(comm);
return NULL;
@@ -128,7 +208,7 @@ int comm__override(struct comm *comm, const char *str, u64 timestamp, bool exec)
{
struct comm_str *new, *old = comm->comm_str;
- new = comm_str__findnew(str, &comm_str_root);
+ new = comm_strs__findnew(str);
if (!new)
return -ENOMEM;
@@ -149,5 +229,5 @@ void comm__free(struct comm *comm)
const char *comm__str(const struct comm *comm)
{
- return comm->comm_str->str;
+ return comm_str__str(comm->comm_str);
}
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 356e30c42c..27094211ed 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -180,8 +180,6 @@ struct cpu_aggr_map *cpu_aggr_map__empty_new(int nr)
cpus->nr = nr;
for (i = 0; i < nr; i++)
cpus->map[i] = aggr_cpu_id__empty();
-
- refcount_set(&cpus->refcnt, 1);
}
return cpus;
@@ -655,10 +653,10 @@ static char hex_char(unsigned char val)
size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
{
- int i, cpu;
+ int idx;
char *ptr = buf;
unsigned char *bitmap;
- struct perf_cpu last_cpu = perf_cpu_map__cpu(map, perf_cpu_map__nr(map) - 1);
+ struct perf_cpu c, last_cpu = perf_cpu_map__max(map);
if (buf == NULL)
return 0;
@@ -669,12 +667,10 @@ size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size)
return 0;
}
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- cpu = perf_cpu_map__cpu(map, i).cpu;
- bitmap[cpu / 8] |= 1 << (cpu % 8);
- }
+ perf_cpu_map__for_each_cpu(c, idx, map)
+ bitmap[c.cpu / 8] |= 1 << (c.cpu % 8);
- for (cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
+ for (int cpu = last_cpu.cpu / 4 * 4; cpu >= 0; cpu -= 4) {
unsigned char bits = bitmap[cpu / 8];
if (cpu % 8)
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 26cf76c693..ee0f6139b0 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -5,7 +5,6 @@
#include <stdbool.h>
#include <stdio.h>
#include <perf/cpumap.h>
-#include <linux/refcount.h>
/** Identify where counts are aggregated, -1 implies not to aggregate. */
struct aggr_cpu_id {
@@ -37,7 +36,6 @@ struct aggr_cpu_id {
/** A collection of aggr_cpu_id values, the "built" version is sorted and uniqued. */
struct cpu_aggr_map {
- refcount_t refcnt;
/** Number of valid entries. */
int nr;
/** The entries. */
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index d65d748588..5e9fbcfad7 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -335,8 +335,11 @@ static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
/* check that we can handle this version */
- if (version > CS_AUX_HW_ID_CURR_VERSION)
+ if (version > CS_AUX_HW_ID_CURR_VERSION) {
+ pr_err("CS ETM Trace: PERF_RECORD_AUX_OUTPUT_HW_ID version %d not supported. Please update Perf.\n",
+ version);
return -EINVAL;
+ }
/* get access to the etm metadata */
etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
@@ -1010,7 +1013,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
if (!dso)
goto out;
- if (dso->data.status == DSO_DATA_STATUS_ERROR &&
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR &&
dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE))
goto out;
@@ -1024,11 +1027,11 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
if (len <= 0) {
ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
" Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
- if (!dso->auxtrace_warned) {
+ if (!dso__auxtrace_warned(dso)) {
pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
- address,
- dso->long_name ? dso->long_name : "Unknown");
- dso->auxtrace_warned = true;
+ address,
+ dso__long_name(dso) ? dso__long_name(dso) : "Unknown");
+ dso__set_auxtrace_warned(dso);
}
goto out;
}
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
index 09d57efd2d..3cf64f5b23 100644
--- a/tools/perf/util/data-convert-json.c
+++ b/tools/perf/util/data-convert-json.c
@@ -134,7 +134,7 @@ static void output_sample_callchain_entry(struct perf_tool *tool,
output_json_key_string(out, false, 5, "symbol", al->sym->name);
if (dso) {
- const char *dso_name = dso->short_name;
+ const char *dso_name = dso__short_name(dso);
if (dso_name && strlen(dso_name) > 0) {
fputc(',', out);
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 106429155c..50f916374d 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -146,10 +146,10 @@ int db_export__comm_thread(struct db_export *dbe, struct comm *comm,
int db_export__dso(struct db_export *dbe, struct dso *dso,
struct machine *machine)
{
- if (dso->db_id)
+ if (dso__db_id(dso))
return 0;
- dso->db_id = ++dbe->dso_last_db_id;
+ dso__set_db_id(dso, ++dbe->dso_last_db_id);
if (dbe->export_dso)
return dbe->export_dso(dbe, dso, machine);
@@ -184,7 +184,7 @@ static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
err = db_export__dso(dbe, dso, maps__machine(al->maps));
if (err)
return err;
- *dso_db_id = dso->db_id;
+ *dso_db_id = dso__db_id(dso);
if (!al->sym) {
al->sym = symbol__new(al->addr, 0, 0, 0, "unknown");
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index c39ee0fcb8..d633d15329 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -41,6 +41,7 @@ static int redirect_to_stderr;
int debug_data_convert;
static FILE *_debug_file;
bool debug_display_time;
+int debug_type_profile;
FILE *debug_file(void)
{
@@ -231,6 +232,7 @@ static struct sublevel_option debug_opts[] = {
{ .name = "data-convert", .value_ptr = &debug_data_convert },
{ .name = "perf-event-open", .value_ptr = &debug_peo_args },
{ .name = "kmaps", .value_ptr = &debug_kmaps },
+ { .name = "type-profile", .value_ptr = &debug_type_profile },
{ .name = NULL, }
};
@@ -270,6 +272,7 @@ int perf_quiet_option(void)
redirect_to_stderr = 0;
debug_peo_args = 0;
debug_kmaps = 0;
+ debug_type_profile = 0;
return 0;
}
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 35a7a5ae76..a4026d1fd6 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -14,6 +14,7 @@ extern int debug_peo_args;
extern bool quiet, dump_trace;
extern int debug_ordered_events;
extern int debug_data_convert;
+extern int debug_type_profile;
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
new file mode 100644
index 0000000000..e10558b795
--- /dev/null
+++ b/tools/perf/util/disasm.c
@@ -0,0 +1,1837 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <libgen.h>
+#include <regex.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <linux/string.h>
+#include <subcmd/run-command.h>
+
+#include "annotate.h"
+#include "build-id.h"
+#include "debug.h"
+#include "disasm.h"
+#include "dso.h"
+#include "env.h"
+#include "evsel.h"
+#include "map.h"
+#include "maps.h"
+#include "namespaces.h"
+#include "srcline.h"
+#include "symbol.h"
+#include "util.h"
+
+static regex_t file_lineno;
+
+/* These can be referred from the arch-dependent code */
+static struct ins_ops call_ops;
+static struct ins_ops dec_ops;
+static struct ins_ops jump_ops;
+static struct ins_ops mov_ops;
+static struct ins_ops nop_ops;
+static struct ins_ops lock_ops;
+static struct ins_ops ret_ops;
+
+static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name);
+static int call__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name);
+
+static void ins__sort(struct arch *arch);
+static int disasm_line__parse(char *line, const char **namep, char **rawp);
+
+static __attribute__((constructor)) void symbol__init_regexpr(void)
+{
+ regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED);
+}
+
+static int arch__grow_instructions(struct arch *arch)
+{
+ struct ins *new_instructions;
+ size_t new_nr_allocated;
+
+ if (arch->nr_instructions_allocated == 0 && arch->instructions)
+ goto grow_from_non_allocated_table;
+
+ new_nr_allocated = arch->nr_instructions_allocated + 128;
+ new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins));
+ if (new_instructions == NULL)
+ return -1;
+
+out_update_instructions:
+ arch->instructions = new_instructions;
+ arch->nr_instructions_allocated = new_nr_allocated;
+ return 0;
+
+grow_from_non_allocated_table:
+ new_nr_allocated = arch->nr_instructions + 128;
+ new_instructions = calloc(new_nr_allocated, sizeof(struct ins));
+ if (new_instructions == NULL)
+ return -1;
+
+ memcpy(new_instructions, arch->instructions, arch->nr_instructions);
+ goto out_update_instructions;
+}
+
+static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops)
+{
+ struct ins *ins;
+
+ if (arch->nr_instructions == arch->nr_instructions_allocated &&
+ arch__grow_instructions(arch))
+ return -1;
+
+ ins = &arch->instructions[arch->nr_instructions];
+ ins->name = strdup(name);
+ if (!ins->name)
+ return -1;
+
+ ins->ops = ops;
+ arch->nr_instructions++;
+
+ ins__sort(arch);
+ return 0;
+}
+
+#include "arch/arc/annotate/instructions.c"
+#include "arch/arm/annotate/instructions.c"
+#include "arch/arm64/annotate/instructions.c"
+#include "arch/csky/annotate/instructions.c"
+#include "arch/loongarch/annotate/instructions.c"
+#include "arch/mips/annotate/instructions.c"
+#include "arch/x86/annotate/instructions.c"
+#include "arch/powerpc/annotate/instructions.c"
+#include "arch/riscv64/annotate/instructions.c"
+#include "arch/s390/annotate/instructions.c"
+#include "arch/sparc/annotate/instructions.c"
+
+static struct arch architectures[] = {
+ {
+ .name = "arc",
+ .init = arc__annotate_init,
+ },
+ {
+ .name = "arm",
+ .init = arm__annotate_init,
+ },
+ {
+ .name = "arm64",
+ .init = arm64__annotate_init,
+ },
+ {
+ .name = "csky",
+ .init = csky__annotate_init,
+ },
+ {
+ .name = "mips",
+ .init = mips__annotate_init,
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
+ {
+ .name = "x86",
+ .init = x86__annotate_init,
+ .instructions = x86__instructions,
+ .nr_instructions = ARRAY_SIZE(x86__instructions),
+ .insn_suffix = "bwlq",
+ .objdump = {
+ .comment_char = '#',
+ .register_char = '%',
+ .memory_ref_char = '(',
+ .imm_char = '$',
+ },
+ },
+ {
+ .name = "powerpc",
+ .init = powerpc__annotate_init,
+ },
+ {
+ .name = "riscv64",
+ .init = riscv64__annotate_init,
+ },
+ {
+ .name = "s390",
+ .init = s390__annotate_init,
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
+ {
+ .name = "sparc",
+ .init = sparc__annotate_init,
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
+ {
+ .name = "loongarch",
+ .init = loongarch__annotate_init,
+ .objdump = {
+ .comment_char = '#',
+ },
+ },
+};
+
+static int arch__key_cmp(const void *name, const void *archp)
+{
+ const struct arch *arch = archp;
+
+ return strcmp(name, arch->name);
+}
+
+static int arch__cmp(const void *a, const void *b)
+{
+ const struct arch *aa = a;
+ const struct arch *ab = b;
+
+ return strcmp(aa->name, ab->name);
+}
+
+static void arch__sort(void)
+{
+ const int nmemb = ARRAY_SIZE(architectures);
+
+ qsort(architectures, nmemb, sizeof(struct arch), arch__cmp);
+}
+
+struct arch *arch__find(const char *name)
+{
+ const int nmemb = ARRAY_SIZE(architectures);
+ static bool sorted;
+
+ if (!sorted) {
+ arch__sort();
+ sorted = true;
+ }
+
+ return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp);
+}
+
+bool arch__is(struct arch *arch, const char *name)
+{
+ return !strcmp(arch->name, name);
+}
+
+static void ins_ops__delete(struct ins_operands *ops)
+{
+ if (ops == NULL)
+ return;
+ zfree(&ops->source.raw);
+ zfree(&ops->source.name);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw);
+}
+
+int ins__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ if (ins->ops->scnprintf)
+ return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name);
+
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
+}
+
+bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2)
+{
+ if (!arch || !arch->ins_is_fused)
+ return false;
+
+ return arch->ins_is_fused(arch, ins1, ins2);
+}
+
+static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+{
+ char *endptr, *tok, *name;
+ struct map *map = ms->map;
+ struct addr_map_symbol target = {
+ .ms = { .map = map, },
+ };
+
+ ops->target.addr = strtoull(ops->raw, &endptr, 16);
+
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ goto indirect_call;
+
+ name++;
+
+ if (arch->objdump.skip_functions_char &&
+ strchr(name, arch->objdump.skip_functions_char))
+ return -1;
+
+ tok = strchr(name, '>');
+ if (tok == NULL)
+ return -1;
+
+ *tok = '\0';
+ ops->target.name = strdup(name);
+ *tok = '>';
+
+ if (ops->target.name == NULL)
+ return -1;
+find_target:
+ target.addr = map__objdump_2mem(map, ops->target.addr);
+
+ if (maps__find_ams(ms->maps, &target) == 0 &&
+ map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ ops->target.sym = target.ms.sym;
+
+ return 0;
+
+indirect_call:
+ tok = strchr(endptr, '*');
+ if (tok != NULL) {
+ endptr++;
+
+ /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
+ * Do not parse such instruction. */
+ if (strstr(endptr, "(%r") == NULL)
+ ops->target.addr = strtoull(endptr, NULL, 16);
+ }
+ goto find_target;
+}
+
+static int call__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ if (ops->target.sym)
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
+
+ if (ops->target.addr == 0)
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
+
+ if (ops->target.name)
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name);
+
+ return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr);
+}
+
+static struct ins_ops call_ops = {
+ .parse = call__parse,
+ .scnprintf = call__scnprintf,
+};
+
+bool ins__is_call(const struct ins *ins)
+{
+ return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops;
+}
+
+/*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
+ *
+ * and skip comma as part of function arguments, e.g.:
+ * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc>
+ */
+static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+{
+ if (ops->jump.raw_comment && c > ops->jump.raw_comment)
+ return NULL;
+
+ if (ops->jump.raw_func_start && c > ops->jump.raw_func_start)
+ return NULL;
+
+ return c;
+}
+
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+{
+ struct map *map = ms->map;
+ struct symbol *sym = ms->sym;
+ struct addr_map_symbol target = {
+ .ms = { .map = map, },
+ };
+ const char *c = strchr(ops->raw, ',');
+ u64 start, end;
+
+ ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ ops->jump.raw_func_start = strchr(ops->raw, '<');
+
+ c = validate_comma(c, ops);
+
+ /*
+ * Examples of lines to parse for the _cpp_lex_token@@Base
+ * function:
+ *
+ * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92>
+ * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72>
+ *
+ * The first is a jump to an offset inside the same function,
+ * the second is to another function, i.e. that 0xa72 is an
+ * offset in the cpp_named_operator2name@@base function.
+ */
+ /*
+ * skip over possible up to 2 operands to get to address, e.g.:
+ * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0>
+ */
+ if (c++ != NULL) {
+ ops->target.addr = strtoull(c, NULL, 16);
+ if (!ops->target.addr) {
+ c = strchr(c, ',');
+ c = validate_comma(c, ops);
+ if (c++ != NULL)
+ ops->target.addr = strtoull(c, NULL, 16);
+ }
+ } else {
+ ops->target.addr = strtoull(ops->raw, NULL, 16);
+ }
+
+ target.addr = map__objdump_2mem(map, ops->target.addr);
+ start = map__unmap_ip(map, sym->start);
+ end = map__unmap_ip(map, sym->end);
+
+ ops->target.outside = target.addr < start || target.addr > end;
+
+ /*
+ * FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
+
+ cpp_named_operator2name@@Base+0xa72
+
+ * Point to a place that is after the cpp_named_operator2name
+ * boundaries, i.e. in the ELF symbol table for cc1
+ * cpp_named_operator2name is marked as being 32-bytes long, but it in
+ * fact is much larger than that, so we seem to need a symbols__find()
+ * routine that looks for >= current->start and < next_symbol->start,
+ * possibly just for C++ objects?
+ *
+ * For now lets just make some progress by marking jumps to outside the
+ * current function as call like.
+ *
+ * Actual navigation will come next, with further understanding of how
+ * the symbol searching and disassembly should be done.
+ */
+ if (maps__find_ams(ms->maps, &target) == 0 &&
+ map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ ops->target.sym = target.ms.sym;
+
+ if (!ops->target.outside) {
+ ops->target.offset = target.addr - start;
+ ops->target.offset_avail = true;
+ } else {
+ ops->target.offset_avail = false;
+ }
+
+ return 0;
+}
+
+static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ const char *c;
+
+ if (!ops->target.addr || ops->target.offset < 0)
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
+
+ if (ops->target.outside && ops->target.sym != NULL)
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name);
+
+ c = strchr(ops->raw, ',');
+ c = validate_comma(c, ops);
+
+ if (c != NULL) {
+ const char *c2 = strchr(c + 1, ',');
+
+ c2 = validate_comma(c2, ops);
+ /* check for 3-op insn */
+ if (c2 != NULL)
+ c = c2;
+ c++;
+
+ /* mirror arch objdump's space-after-comma style */
+ if (*c == ' ')
+ c++;
+ }
+
+ return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name,
+ ins->name, c ? c - ops->raw : 0, ops->raw,
+ ops->target.offset);
+}
+
+static void jump__delete(struct ins_operands *ops __maybe_unused)
+{
+ /*
+ * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the
+ * raw string, don't free them.
+ */
+}
+
+static struct ins_ops jump_ops = {
+ .free = jump__delete,
+ .parse = jump__parse,
+ .scnprintf = jump__scnprintf,
+};
+
+bool ins__is_jump(const struct ins *ins)
+{
+ return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops;
+}
+
+static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep)
+{
+ char *endptr, *name, *t;
+
+ if (strstr(raw, "(%rip)") == NULL)
+ return 0;
+
+ *addrp = strtoull(comment, &endptr, 16);
+ if (endptr == comment)
+ return 0;
+ name = strchr(endptr, '<');
+ if (name == NULL)
+ return -1;
+
+ name++;
+
+ t = strchr(name, '>');
+ if (t == NULL)
+ return 0;
+
+ *t = '\0';
+ *namep = strdup(name);
+ *t = '>';
+
+ return 0;
+}
+
+static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
+{
+ ops->locked.ops = zalloc(sizeof(*ops->locked.ops));
+ if (ops->locked.ops == NULL)
+ return 0;
+
+ if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0)
+ goto out_free_ops;
+
+ ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name);
+
+ if (ops->locked.ins.ops == NULL)
+ goto out_free_ops;
+
+ if (ops->locked.ins.ops->parse &&
+ ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0)
+ goto out_free_ops;
+
+ return 0;
+
+out_free_ops:
+ zfree(&ops->locked.ops);
+ return 0;
+}
+
+static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ int printed;
+
+ if (ops->locked.ins.ops == NULL)
+ return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name);
+
+ printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name);
+ return printed + ins__scnprintf(&ops->locked.ins, bf + printed,
+ size - printed, ops->locked.ops, max_ins_name);
+}
+
+static void lock__delete(struct ins_operands *ops)
+{
+ struct ins *ins = &ops->locked.ins;
+
+ if (ins->ops && ins->ops->free)
+ ins->ops->free(ops->locked.ops);
+ else
+ ins_ops__delete(ops->locked.ops);
+
+ zfree(&ops->locked.ops);
+ zfree(&ops->target.raw);
+ zfree(&ops->target.name);
+}
+
+static struct ins_ops lock_ops = {
+ .free = lock__delete,
+ .parse = lock__parse,
+ .scnprintf = lock__scnprintf,
+};
+
+/*
+ * Check if the operand has more than one registers like x86 SIB addressing:
+ * 0x1234(%rax, %rbx, 8)
+ *
+ * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check
+ * the input string after 'memory_ref_char' if exists.
+ */
+static bool check_multi_regs(struct arch *arch, const char *op)
+{
+ int count = 0;
+
+ if (arch->objdump.register_char == 0)
+ return false;
+
+ if (arch->objdump.memory_ref_char) {
+ op = strchr(op, arch->objdump.memory_ref_char);
+ if (op == NULL)
+ return false;
+ }
+
+ while ((op = strchr(op, arch->objdump.register_char)) != NULL) {
+ count++;
+ op++;
+ }
+
+ return count > 1;
+}
+
+static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
+{
+ char *s = strchr(ops->raw, ','), *target, *comment, prev;
+
+ if (s == NULL)
+ return -1;
+
+ *s = '\0';
+
+ /*
+ * x86 SIB addressing has something like 0x8(%rax, %rcx, 1)
+ * then it needs to have the closing parenthesis.
+ */
+ if (strchr(ops->raw, '(')) {
+ *s = ',';
+ s = strchr(ops->raw, ')');
+ if (s == NULL || s[1] != ',')
+ return -1;
+ *++s = '\0';
+ }
+
+ ops->source.raw = strdup(ops->raw);
+ *s = ',';
+
+ if (ops->source.raw == NULL)
+ return -1;
+
+ ops->source.multi_regs = check_multi_regs(arch, ops->source.raw);
+
+ target = skip_spaces(++s);
+ comment = strchr(s, arch->objdump.comment_char);
+
+ if (comment != NULL)
+ s = comment - 1;
+ else
+ s = strchr(s, '\0') - 1;
+
+ while (s > target && isspace(s[0]))
+ --s;
+ s++;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ goto out_free_source;
+
+ ops->target.multi_regs = check_multi_regs(arch, ops->target.raw);
+
+ if (comment == NULL)
+ return 0;
+
+ comment = skip_spaces(comment);
+ comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
+
+ return 0;
+
+out_free_source:
+ zfree(&ops->source.raw);
+ return -1;
+}
+
+static int mov__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name,
+ ops->source.name ?: ops->source.raw,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops mov_ops = {
+ .parse = mov__parse,
+ .scnprintf = mov__scnprintf,
+};
+
+static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused)
+{
+ char *target, *comment, *s, prev;
+
+ target = s = ops->raw;
+
+ while (s[0] != '\0' && !isspace(s[0]))
+ ++s;
+ prev = *s;
+ *s = '\0';
+
+ ops->target.raw = strdup(target);
+ *s = prev;
+
+ if (ops->target.raw == NULL)
+ return -1;
+
+ comment = strchr(s, arch->objdump.comment_char);
+ if (comment == NULL)
+ return 0;
+
+ comment = skip_spaces(comment);
+ comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name);
+
+ return 0;
+}
+
+static int dec__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name,
+ ops->target.name ?: ops->target.raw);
+}
+
+static struct ins_ops dec_ops = {
+ .parse = dec__parse,
+ .scnprintf = dec__scnprintf,
+};
+
+static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size,
+ struct ins_operands *ops __maybe_unused, int max_ins_name)
+{
+ return scnprintf(bf, size, "%-*s", max_ins_name, "nop");
+}
+
+static struct ins_ops nop_ops = {
+ .scnprintf = nop__scnprintf,
+};
+
+static struct ins_ops ret_ops = {
+ .scnprintf = ins__raw_scnprintf,
+};
+
+bool ins__is_nop(const struct ins *ins)
+{
+ return ins->ops == &nop_ops;
+}
+
+bool ins__is_ret(const struct ins *ins)
+{
+ return ins->ops == &ret_ops;
+}
+
+bool ins__is_lock(const struct ins *ins)
+{
+ return ins->ops == &lock_ops;
+}
+
+static int ins__key_cmp(const void *name, const void *insp)
+{
+ const struct ins *ins = insp;
+
+ return strcmp(name, ins->name);
+}
+
+static int ins__cmp(const void *a, const void *b)
+{
+ const struct ins *ia = a;
+ const struct ins *ib = b;
+
+ return strcmp(ia->name, ib->name);
+}
+
+static void ins__sort(struct arch *arch)
+{
+ const int nmemb = arch->nr_instructions;
+
+ qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
+static struct ins_ops *__ins__find(struct arch *arch, const char *name)
+{
+ struct ins *ins;
+ const int nmemb = arch->nr_instructions;
+
+ if (!arch->sorted_instructions) {
+ ins__sort(arch);
+ arch->sorted_instructions = true;
+ }
+
+ ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
+ if (ins)
+ return ins->ops;
+
+ if (arch->insn_suffix) {
+ char tmp[32];
+ char suffix;
+ size_t len = strlen(name);
+
+ if (len == 0 || len >= sizeof(tmp))
+ return NULL;
+
+ suffix = name[len - 1];
+ if (strchr(arch->insn_suffix, suffix) == NULL)
+ return NULL;
+
+ strcpy(tmp, name);
+ tmp[len - 1] = '\0'; /* remove the suffix and check again */
+
+ ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp);
+ }
+ return ins ? ins->ops : NULL;
+}
+
+struct ins_ops *ins__find(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = __ins__find(arch, name);
+
+ if (!ops && arch->associate_instruction_ops)
+ ops = arch->associate_instruction_ops(arch, name);
+
+ return ops;
+}
+
+static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms)
+{
+ dl->ins.ops = ins__find(arch, dl->ins.name);
+
+ if (!dl->ins.ops)
+ return;
+
+ if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0)
+ dl->ins.ops = NULL;
+}
+
+static int disasm_line__parse(char *line, const char **namep, char **rawp)
+{
+ char tmp, *name = skip_spaces(line);
+
+ if (name[0] == '\0')
+ return -1;
+
+ *rawp = name + 1;
+
+ while ((*rawp)[0] != '\0' && !isspace((*rawp)[0]))
+ ++*rawp;
+
+ tmp = (*rawp)[0];
+ (*rawp)[0] = '\0';
+ *namep = strdup(name);
+
+ if (*namep == NULL)
+ goto out;
+
+ (*rawp)[0] = tmp;
+ *rawp = strim(*rawp);
+
+ return 0;
+
+out:
+ return -1;
+}
+
+static void annotation_line__init(struct annotation_line *al,
+ struct annotate_args *args,
+ int nr)
+{
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->fileloc = args->fileloc;
+ al->data_nr = nr;
+}
+
+static void annotation_line__exit(struct annotation_line *al)
+{
+ zfree_srcline(&al->path);
+ zfree(&al->line);
+ zfree(&al->cycles);
+}
+
+static size_t disasm_line_size(int nr)
+{
+ struct annotation_line *al;
+
+ return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
+}
+
+/*
+ * Allocating the disasm annotation line data with
+ * following structure:
+ *
+ * -------------------------------------------
+ * struct disasm_line | struct annotation_line
+ * -------------------------------------------
+ *
+ * We have 'struct annotation_line' member as last member
+ * of 'struct disasm_line' to have an easy access.
+ */
+struct disasm_line *disasm_line__new(struct annotate_args *args)
+{
+ struct disasm_line *dl = NULL;
+ int nr = 1;
+
+ if (evsel__is_group_event(args->evsel))
+ nr = args->evsel->core.nr_members;
+
+ dl = zalloc(disasm_line_size(nr));
+ if (!dl)
+ return NULL;
+
+ annotation_line__init(&dl->al, args, nr);
+ if (dl->al.line == NULL)
+ goto out_delete;
+
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
+ goto out_free_line;
+
+ disasm_line__init_ins(dl, args->arch, &args->ms);
+ }
+
+ return dl;
+
+out_free_line:
+ zfree(&dl->al.line);
+out_delete:
+ free(dl);
+ return NULL;
+}
+
+void disasm_line__free(struct disasm_line *dl)
+{
+ if (dl->ins.ops && dl->ins.ops->free)
+ dl->ins.ops->free(&dl->ops);
+ else
+ ins_ops__delete(&dl->ops);
+ zfree(&dl->ins.name);
+ annotation_line__exit(&dl->al);
+ free(dl);
+}
+
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
+{
+ if (raw || !dl->ins.ops)
+ return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw);
+
+ return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name);
+}
+
+/*
+ * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw)
+ * which looks like following
+ *
+ * 0000000000415500 <_init>:
+ * 415500: sub $0x8,%rsp
+ * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8>
+ * 41550b: test %rax,%rax
+ * 41550e: je 415515 <_init+0x15>
+ * 415510: callq 416e70 <__gmon_start__@plt>
+ * 415515: add $0x8,%rsp
+ * 415519: retq
+ *
+ * it will be parsed and saved into struct disasm_line as
+ * <offset> <name> <ops.raw>
+ *
+ * The offset will be a relative offset from the start of the symbol and -1
+ * means that it's not a disassembly line so should be treated differently.
+ * The ops.raw part will be parsed further according to type of the instruction.
+ */
+static int symbol__parse_objdump_line(struct symbol *sym,
+ struct annotate_args *args,
+ char *parsed_line, int *line_nr, char **fileloc)
+{
+ struct map *map = args->ms.map;
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+ char *tmp;
+ s64 line_ip, offset = -1;
+ regmatch_t match[2];
+
+ /* /filename:linenr ? Save line number and ignore. */
+ if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
+ *line_nr = atoi(parsed_line + match[1].rm_so);
+ free(*fileloc);
+ *fileloc = strdup(parsed_line);
+ return 0;
+ }
+
+ /* Process hex address followed by ':'. */
+ line_ip = strtoull(parsed_line, &tmp, 16);
+ if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') {
+ u64 start = map__rip_2objdump(map, sym->start),
+ end = map__rip_2objdump(map, sym->end);
+
+ offset = line_ip - start;
+ if ((u64)line_ip < start || (u64)line_ip >= end)
+ offset = -1;
+ else
+ parsed_line = tmp + 1;
+ }
+
+ args->offset = offset;
+ args->line = parsed_line;
+ args->line_nr = *line_nr;
+ args->fileloc = *fileloc;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ (*line_nr)++;
+
+ if (dl == NULL)
+ return -1;
+
+ if (!disasm_line__has_local_offset(dl)) {
+ dl->ops.target.offset = dl->ops.target.addr -
+ map__rip_2objdump(map, sym->start);
+ dl->ops.target.offset_avail = true;
+ }
+
+ /* kcore has no symbols, so add the call target symbol */
+ if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) {
+ struct addr_map_symbol target = {
+ .addr = dl->ops.target.addr,
+ .ms = { .map = map, },
+ };
+
+ if (!maps__find_ams(args->ms.maps, &target) &&
+ target.ms.sym->start == target.al_addr)
+ dl->ops.target.sym = target.ms.sym;
+ }
+
+ annotation_line__add(&dl->al, &notes->src->source);
+ return 0;
+}
+
+static void delete_last_nop(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct list_head *list = &notes->src->source;
+ struct disasm_line *dl;
+
+ while (!list_empty(list)) {
+ dl = list_entry(list->prev, struct disasm_line, al.node);
+
+ if (dl->ins.ops) {
+ if (!ins__is_nop(&dl->ins))
+ return;
+ } else {
+ if (!strstr(dl->al.line, " nop ") &&
+ !strstr(dl->al.line, " nopl ") &&
+ !strstr(dl->al.line, " nopw "))
+ return;
+ }
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+}
+
+int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen)
+{
+ struct dso *dso = map__dso(ms->map);
+
+ BUG_ON(buflen == 0);
+
+ if (errnum >= 0) {
+ str_error_r(errnum, buf, buflen);
+ return 0;
+ }
+
+ switch (errnum) {
+ case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: {
+ char bf[SBUILD_ID_SIZE + 15] = " with build id ";
+ char *build_id_msg = NULL;
+
+ if (dso__has_build_id(dso)) {
+ build_id__sprintf(dso__bid(dso), bf + 15);
+ build_id_msg = bf;
+ }
+ scnprintf(buf, buflen,
+ "No vmlinux file%s\nwas found in the path.\n\n"
+ "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n"
+ "Please use:\n\n"
+ " perf buildid-cache -vu vmlinux\n\n"
+ "or:\n\n"
+ " --vmlinux vmlinux\n", build_id_msg ?: "");
+ }
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
+ scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
+ scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
+ scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
+ scnprintf(buf, buflen, "Invalid BPF file: %s.", dso__long_name(dso));
+ break;
+ case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
+ scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
+ dso__long_name(dso));
+ break;
+ default:
+ scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
+ break;
+ }
+
+ return 0;
+}
+
+static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size)
+{
+ char linkname[PATH_MAX];
+ char *build_id_filename;
+ char *build_id_path = NULL;
+ char *pos;
+ int len;
+
+ if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(dso))
+ return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX;
+
+ build_id_filename = dso__build_id_filename(dso, NULL, 0, false);
+ if (build_id_filename) {
+ __symbol__join_symfs(filename, filename_size, build_id_filename);
+ free(build_id_filename);
+ } else {
+ if (dso__has_build_id(dso))
+ return ENOMEM;
+ goto fallback;
+ }
+
+ build_id_path = strdup(filename);
+ if (!build_id_path)
+ return ENOMEM;
+
+ /*
+ * old style build-id cache has name of XX/XXXXXXX.. while
+ * new style has XX/XXXXXXX../{elf,kallsyms,vdso}.
+ * extract the build-id part of dirname in the new style only.
+ */
+ pos = strrchr(build_id_path, '/');
+ if (pos && strlen(pos) < SBUILD_ID_SIZE - 2)
+ dirname(build_id_path);
+
+ if (dso__is_kcore(dso))
+ goto fallback;
+
+ len = readlink(build_id_path, linkname, sizeof(linkname) - 1);
+ if (len < 0)
+ goto fallback;
+
+ linkname[len] = '\0';
+ if (strstr(linkname, DSO__NAME_KALLSYMS) ||
+ access(filename, R_OK)) {
+fallback:
+ /*
+ * If we don't have build-ids or the build-id file isn't in the
+ * cache, or is just a kallsyms file, well, lets hope that this
+ * DSO is the same as when 'perf record' ran.
+ */
+ if (dso__kernel(dso) && dso__long_name(dso)[0] == '/')
+ snprintf(filename, filename_size, "%s", dso__long_name(dso));
+ else
+ __symbol__join_symfs(filename, filename_size, dso__long_name(dso));
+
+ mutex_lock(dso__lock(dso));
+ if (access(filename, R_OK) && errno == ENOENT && dso__nsinfo(dso)) {
+ char *new_name = dso__filename_with_chroot(dso, filename);
+ if (new_name) {
+ strlcpy(filename, new_name, filename_size);
+ free(new_name);
+ }
+ }
+ mutex_unlock(dso__lock(dso));
+ } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) {
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__BUILD_ID_CACHE);
+ }
+
+ free(build_id_path);
+ return 0;
+}
+
+#if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+#define PACKAGE "perf"
+#include <bfd.h>
+#include <dis-asm.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
+#include <tools/dis-asm-compat.h>
+
+#include "bpf-event.h"
+#include "bpf-utils.h"
+
+static int symbol__disassemble_bpf(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ struct bpf_prog_info_node *info_node;
+ int len = sym->end - sym->start;
+ disassembler_ftype disassemble;
+ struct map *map = args->ms.map;
+ struct perf_bpil *info_linear;
+ struct disassemble_info info;
+ struct dso *dso = map__dso(map);
+ int pc = 0, count, sub_id;
+ struct btf *btf = NULL;
+ char tpath[PATH_MAX];
+ size_t buf_size;
+ int nr_skip = 0;
+ char *buf;
+ bfd *bfdf;
+ int ret;
+ FILE *s;
+
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BPF_PROG_INFO)
+ return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
+
+ pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
+ sym->name, sym->start, sym->end - sym->start);
+
+ memset(tpath, 0, sizeof(tpath));
+ perf_exe(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ if (bfdf == NULL)
+ abort();
+
+ if (!bfd_check_format(bfdf, bfd_object))
+ abort();
+
+ s = open_memstream(&buf, &buf_size);
+ if (!s) {
+ ret = errno;
+ goto out;
+ }
+ init_disassemble_info_compat(&info, s,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+
+ info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
+ dso__bpf_prog(dso)->id);
+ if (!info_node) {
+ ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
+ goto out;
+ }
+ info_linear = info_node->info_linear;
+ sub_id = dso__bpf_prog(dso)->sub_id;
+
+ info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns);
+ info.buffer_length = info_linear->info.jited_prog_len;
+
+ if (info_linear->info.nr_line_info)
+ prog_linfo = bpf_prog_linfo__new(&info_linear->info);
+
+ if (info_linear->info.btf_id) {
+ struct btf_node *node;
+
+ node = perf_env__find_btf(dso__bpf_prog(dso)->env,
+ info_linear->info.btf_id);
+ if (node)
+ btf = btf__new((__u8 *)(node->data),
+ node->data_size);
+ }
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ if (disassemble == NULL)
+ abort();
+
+ fflush(s);
+ do {
+ const struct bpf_line_info *linfo = NULL;
+ struct disasm_line *dl;
+ size_t prev_buf_size;
+ const char *srcline;
+ u64 addr;
+
+ addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id];
+ count = disassemble(pc, &info);
+
+ if (prog_linfo)
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ addr, sub_id,
+ nr_skip);
+
+ if (linfo && btf) {
+ srcline = btf__name_by_offset(btf, linfo->line_off);
+ nr_skip++;
+ } else
+ srcline = NULL;
+
+ fprintf(s, "\n");
+ prev_buf_size = buf_size;
+ fflush(s);
+
+ if (!annotate_opts.hide_src_code && srcline) {
+ args->offset = -1;
+ args->line = strdup(srcline);
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl) {
+ annotation_line__add(&dl->al,
+ &notes->src->source);
+ }
+ }
+
+ args->offset = pc;
+ args->line = buf + prev_buf_size;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ pc += count;
+ } while (count > 0 && pc < len);
+
+ ret = 0;
+out:
+ free(prog_linfo);
+ btf__free(btf);
+ fclose(s);
+ bfd_close(bfdf);
+ return ret;
+}
+#else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused,
+ struct annotate_args *args __maybe_unused)
+{
+ return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF;
+}
+#endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT)
+
+static int
+symbol__disassemble_bpf_image(struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct disasm_line *dl;
+
+ args->offset = -1;
+ args->line = strdup("to be implemented");
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ dl = disasm_line__new(args);
+ if (dl)
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ zfree(&args->line);
+ return 0;
+}
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+#include <capstone/capstone.h>
+
+static int open_capstone_handle(struct annotate_args *args, bool is_64bit,
+ csh *handle)
+{
+ struct annotation_options *opt = args->options;
+ cs_mode mode = is_64bit ? CS_MODE_64 : CS_MODE_32;
+
+ /* TODO: support more architectures */
+ if (!arch__is(args->arch, "x86"))
+ return -1;
+
+ if (cs_open(CS_ARCH_X86, mode, handle) != CS_ERR_OK)
+ return -1;
+
+ if (!opt->disassembler_style ||
+ !strcmp(opt->disassembler_style, "att"))
+ cs_option(*handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT);
+
+ /*
+ * Resolving address operands to symbols is implemented
+ * on x86 by investigating instruction details.
+ */
+ cs_option(*handle, CS_OPT_DETAIL, CS_OPT_ON);
+
+ return 0;
+}
+
+struct find_file_offset_data {
+ u64 ip;
+ u64 offset;
+};
+
+/* This will be called for each PHDR in an ELF binary */
+static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
+{
+ struct find_file_offset_data *data = arg;
+
+ if (start <= data->ip && data->ip < start + len) {
+ data->offset = pgoff + data->ip - start;
+ return 1;
+ }
+ return 0;
+}
+
+static void print_capstone_detail(cs_insn *insn, char *buf, size_t len,
+ struct annotate_args *args, u64 addr)
+{
+ int i;
+ struct map *map = args->ms.map;
+ struct symbol *sym;
+
+ /* TODO: support more architectures */
+ if (!arch__is(args->arch, "x86"))
+ return;
+
+ if (insn->detail == NULL)
+ return;
+
+ for (i = 0; i < insn->detail->x86.op_count; i++) {
+ cs_x86_op *op = &insn->detail->x86.operands[i];
+ u64 orig_addr;
+
+ if (op->type != X86_OP_MEM)
+ continue;
+
+ /* only print RIP-based global symbols for now */
+ if (op->mem.base != X86_REG_RIP)
+ continue;
+
+ /* get the target address */
+ orig_addr = addr + insn->size + op->mem.disp;
+ addr = map__objdump_2mem(map, orig_addr);
+
+ if (dso__kernel(map__dso(map))) {
+ /*
+ * The kernel maps can be splitted into sections,
+ * let's find the map first and the search the symbol.
+ */
+ map = maps__find(map__kmaps(map), addr);
+ if (map == NULL)
+ continue;
+ }
+
+ /* convert it to map-relative address for search */
+ addr = map__map_ip(map, addr);
+
+ sym = map__find_symbol(map, addr);
+ if (sym == NULL)
+ continue;
+
+ if (addr == sym->start) {
+ scnprintf(buf, len, "\t# %"PRIx64" <%s>",
+ orig_addr, sym->name);
+ } else {
+ scnprintf(buf, len, "\t# %"PRIx64" <%s+%#"PRIx64">",
+ orig_addr, sym->name, addr - sym->start);
+ }
+ break;
+ }
+}
+
+static int symbol__disassemble_capstone(char *filename, struct symbol *sym,
+ struct annotate_args *args)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ struct nscookie nsc;
+ u64 start = map__rip_2objdump(map, sym->start);
+ u64 end = map__rip_2objdump(map, sym->end);
+ u64 len = end - start;
+ u64 offset;
+ int i, fd, count;
+ bool is_64bit = false;
+ bool needs_cs_close = false;
+ u8 *buf = NULL;
+ struct find_file_offset_data data = {
+ .ip = start,
+ };
+ csh handle;
+ cs_insn *insn;
+ char disasm_buf[512];
+ struct disasm_line *dl;
+
+ if (args->options->objdump_path)
+ return -1;
+
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ fd = open(filename, O_RDONLY);
+ nsinfo__mountns_exit(&nsc);
+ if (fd < 0)
+ return -1;
+
+ if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data,
+ &is_64bit) == 0)
+ goto err;
+
+ if (open_capstone_handle(args, is_64bit, &handle) < 0)
+ goto err;
+
+ needs_cs_close = true;
+
+ buf = malloc(len);
+ if (buf == NULL)
+ goto err;
+
+ count = pread(fd, buf, len, data.offset);
+ close(fd);
+ fd = -1;
+
+ if ((u64)count != len)
+ goto err;
+
+ /* add the function address and name */
+ scnprintf(disasm_buf, sizeof(disasm_buf), "%#"PRIx64" <%s>:",
+ start, sym->name);
+
+ args->offset = -1;
+ args->line = disasm_buf;
+ args->line_nr = 0;
+ args->fileloc = NULL;
+ args->ms.sym = sym;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ count = cs_disasm(handle, buf, len, start, len, &insn);
+ for (i = 0, offset = 0; i < count; i++) {
+ int printed;
+
+ printed = scnprintf(disasm_buf, sizeof(disasm_buf),
+ " %-7s %s",
+ insn[i].mnemonic, insn[i].op_str);
+ print_capstone_detail(&insn[i], disasm_buf + printed,
+ sizeof(disasm_buf) - printed, args,
+ start + offset);
+
+ args->offset = offset;
+ args->line = disasm_buf;
+
+ dl = disasm_line__new(args);
+ if (dl == NULL)
+ goto err;
+
+ annotation_line__add(&dl->al, &notes->src->source);
+
+ offset += insn[i].size;
+ }
+
+ /* It failed in the middle: probably due to unknown instructions */
+ if (offset != len) {
+ struct list_head *list = &notes->src->source;
+
+ /* Discard all lines and fallback to objdump */
+ while (!list_empty(list)) {
+ dl = list_first_entry(list, struct disasm_line, al.node);
+
+ list_del_init(&dl->al.node);
+ disasm_line__free(dl);
+ }
+ count = -1;
+ }
+
+out:
+ if (needs_cs_close)
+ cs_close(&handle);
+ free(buf);
+ return count < 0 ? count : 0;
+
+err:
+ if (fd >= 0)
+ close(fd);
+ if (needs_cs_close) {
+ struct disasm_line *tmp;
+
+ /*
+ * It probably failed in the middle of the above loop.
+ * Release any resources it might add.
+ */
+ list_for_each_entry_safe(dl, tmp, &notes->src->source, al.node) {
+ list_del(&dl->al.node);
+ free(dl);
+ }
+ }
+ count = -1;
+ goto out;
+}
+#endif
+
+/*
+ * Possibly create a new version of line with tabs expanded. Returns the
+ * existing or new line, storage is updated if a new line is allocated. If
+ * allocation fails then NULL is returned.
+ */
+static char *expand_tabs(char *line, char **storage, size_t *storage_len)
+{
+ size_t i, src, dst, len, new_storage_len, num_tabs;
+ char *new_line;
+ size_t line_len = strlen(line);
+
+ for (num_tabs = 0, i = 0; i < line_len; i++)
+ if (line[i] == '\t')
+ num_tabs++;
+
+ if (num_tabs == 0)
+ return line;
+
+ /*
+ * Space for the line and '\0', less the leading and trailing
+ * spaces. Each tab may introduce 7 additional spaces.
+ */
+ new_storage_len = line_len + 1 + (num_tabs * 7);
+
+ new_line = malloc(new_storage_len);
+ if (new_line == NULL) {
+ pr_err("Failure allocating memory for tab expansion\n");
+ return NULL;
+ }
+
+ /*
+ * Copy regions starting at src and expand tabs. If there are two
+ * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces
+ * are inserted.
+ */
+ for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) {
+ if (line[i] == '\t') {
+ len = i - src;
+ memcpy(&new_line[dst], &line[src], len);
+ dst += len;
+ new_line[dst++] = ' ';
+ while (dst % 8 != 0)
+ new_line[dst++] = ' ';
+ src = i + 1;
+ num_tabs--;
+ }
+ }
+
+ /* Expand the last region. */
+ len = line_len - src;
+ memcpy(&new_line[dst], &line[src], len);
+ dst += len;
+ new_line[dst] = '\0';
+
+ free(*storage);
+ *storage = new_line;
+ *storage_len = new_storage_len;
+ return new_line;
+}
+
+int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
+{
+ struct annotation_options *opts = &annotate_opts;
+ struct map *map = args->ms.map;
+ struct dso *dso = map__dso(map);
+ char *command;
+ FILE *file;
+ char symfs_filename[PATH_MAX];
+ struct kcore_extract kce;
+ bool delete_extract = false;
+ bool decomp = false;
+ int lineno = 0;
+ char *fileloc = NULL;
+ int nline;
+ char *line;
+ size_t line_len;
+ const char *objdump_argv[] = {
+ "/bin/sh",
+ "-c",
+ NULL, /* Will be the objdump command to run. */
+ "--",
+ NULL, /* Will be the symfs path. */
+ NULL,
+ };
+ struct child_process objdump_process;
+ int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename));
+
+ if (err)
+ return err;
+
+ pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__,
+ symfs_filename, sym->name, map__unmap_ip(map, sym->start),
+ map__unmap_ip(map, sym->end));
+
+ pr_debug("annotating [%p] %30s : [%p] %30s\n",
+ dso, dso__long_name(dso), sym, sym->name);
+
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) {
+ return symbol__disassemble_bpf(sym, args);
+ } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) {
+ return symbol__disassemble_bpf_image(sym, args);
+ } else if (dso__binary_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) {
+ return -1;
+ } else if (dso__is_kcore(dso)) {
+ kce.kcore_filename = symfs_filename;
+ kce.addr = map__rip_2objdump(map, sym->start);
+ kce.offs = sym->start;
+ kce.len = sym->end - sym->start;
+ if (!kcore_extract__create(&kce)) {
+ delete_extract = true;
+ strlcpy(symfs_filename, kce.extract_filename,
+ sizeof(symfs_filename));
+ }
+ } else if (dso__needs_decompress(dso)) {
+ char tmp[KMOD_DECOMP_LEN];
+
+ if (dso__decompress_kmodule_path(dso, symfs_filename,
+ tmp, sizeof(tmp)) < 0)
+ return -1;
+
+ decomp = true;
+ strcpy(symfs_filename, tmp);
+ }
+
+#ifdef HAVE_LIBCAPSTONE_SUPPORT
+ err = symbol__disassemble_capstone(symfs_filename, sym, args);
+ if (err == 0)
+ goto out_remove_tmp;
+#endif
+
+ err = asprintf(&command,
+ "%s %s%s --start-address=0x%016" PRIx64
+ " --stop-address=0x%016" PRIx64
+ " %s -d %s %s %s %c%s%c %s%s -C \"$1\"",
+ opts->objdump_path ?: "objdump",
+ opts->disassembler_style ? "-M " : "",
+ opts->disassembler_style ?: "",
+ map__rip_2objdump(map, sym->start),
+ map__rip_2objdump(map, sym->end),
+ opts->show_linenr ? "-l" : "",
+ opts->show_asm_raw ? "" : "--no-show-raw-insn",
+ opts->annotate_src ? "-S" : "",
+ opts->prefix ? "--prefix " : "",
+ opts->prefix ? '"' : ' ',
+ opts->prefix ?: "",
+ opts->prefix ? '"' : ' ',
+ opts->prefix_strip ? "--prefix-strip=" : "",
+ opts->prefix_strip ?: "");
+
+ if (err < 0) {
+ pr_err("Failure allocating memory for the command to run\n");
+ goto out_remove_tmp;
+ }
+
+ pr_debug("Executing: %s\n", command);
+
+ objdump_argv[2] = command;
+ objdump_argv[4] = symfs_filename;
+
+ /* Create a pipe to read from for stdout */
+ memset(&objdump_process, 0, sizeof(objdump_process));
+ objdump_process.argv = objdump_argv;
+ objdump_process.out = -1;
+ objdump_process.err = -1;
+ objdump_process.no_stderr = 1;
+ if (start_command(&objdump_process)) {
+ pr_err("Failure starting to run %s\n", command);
+ err = -1;
+ goto out_free_command;
+ }
+
+ file = fdopen(objdump_process.out, "r");
+ if (!file) {
+ pr_err("Failure creating FILE stream for %s\n", command);
+ /*
+ * If we were using debug info should retry with
+ * original binary.
+ */
+ err = -1;
+ goto out_close_stdout;
+ }
+
+ /* Storage for getline. */
+ line = NULL;
+ line_len = 0;
+
+ nline = 0;
+ while (!feof(file)) {
+ const char *match;
+ char *expanded_line;
+
+ if (getline(&line, &line_len, file) < 0 || !line)
+ break;
+
+ /* Skip lines containing "filename:" */
+ match = strstr(line, symfs_filename);
+ if (match && match[strlen(symfs_filename)] == ':')
+ continue;
+
+ expanded_line = strim(line);
+ expanded_line = expand_tabs(expanded_line, &line, &line_len);
+ if (!expanded_line)
+ break;
+
+ /*
+ * The source code line number (lineno) needs to be kept in
+ * across calls to symbol__parse_objdump_line(), so that it
+ * can associate it with the instructions till the next one.
+ * See disasm_line__new() and struct disasm_line::line_nr.
+ */
+ if (symbol__parse_objdump_line(sym, args, expanded_line,
+ &lineno, &fileloc) < 0)
+ break;
+ nline++;
+ }
+ free(line);
+ free(fileloc);
+
+ err = finish_command(&objdump_process);
+ if (err)
+ pr_err("Error running %s\n", command);
+
+ if (nline == 0) {
+ err = -1;
+ pr_err("No output from %s\n", command);
+ }
+
+ /*
+ * kallsyms does not have symbol sizes so there may a nop at the end.
+ * Remove it.
+ */
+ if (dso__is_kcore(dso))
+ delete_last_nop(sym);
+
+ fclose(file);
+
+out_close_stdout:
+ close(objdump_process.out);
+
+out_free_command:
+ free(command);
+
+out_remove_tmp:
+ if (decomp)
+ unlink(symfs_filename);
+
+ if (delete_extract)
+ kcore_extract__delete(&kce);
+
+ return err;
+}
diff --git a/tools/perf/util/disasm.h b/tools/perf/util/disasm.h
new file mode 100644
index 0000000000..3d381a0435
--- /dev/null
+++ b/tools/perf/util/disasm.h
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __PERF_UTIL_DISASM_H
+#define __PERF_UTIL_DISASM_H
+
+#include "map_symbol.h"
+
+struct annotation_options;
+struct disasm_line;
+struct ins;
+struct evsel;
+struct symbol;
+
+struct arch {
+ const char *name;
+ struct ins *instructions;
+ size_t nr_instructions;
+ size_t nr_instructions_allocated;
+ struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name);
+ bool sorted_instructions;
+ bool initialized;
+ const char *insn_suffix;
+ void *priv;
+ unsigned int model;
+ unsigned int family;
+ int (*init)(struct arch *arch, char *cpuid);
+ bool (*ins_is_fused)(struct arch *arch, const char *ins1,
+ const char *ins2);
+ struct {
+ char comment_char;
+ char skip_functions_char;
+ char register_char;
+ char memory_ref_char;
+ char imm_char;
+ } objdump;
+};
+
+struct ins {
+ const char *name;
+ struct ins_ops *ops;
+};
+
+struct ins_operands {
+ char *raw;
+ struct {
+ char *raw;
+ char *name;
+ struct symbol *sym;
+ u64 addr;
+ s64 offset;
+ bool offset_avail;
+ bool outside;
+ bool multi_regs;
+ } target;
+ union {
+ struct {
+ char *raw;
+ char *name;
+ u64 addr;
+ bool multi_regs;
+ } source;
+ struct {
+ struct ins ins;
+ struct ins_operands *ops;
+ } locked;
+ struct {
+ char *raw_comment;
+ char *raw_func_start;
+ } jump;
+ };
+};
+
+struct ins_ops {
+ void (*free)(struct ins_operands *ops);
+ int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms);
+ int (*scnprintf)(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name);
+};
+
+struct annotate_args {
+ struct arch *arch;
+ struct map_symbol ms;
+ struct evsel *evsel;
+ struct annotation_options *options;
+ s64 offset;
+ char *line;
+ int line_nr;
+ char *fileloc;
+};
+
+struct arch *arch__find(const char *name);
+bool arch__is(struct arch *arch, const char *name);
+
+struct ins_ops *ins__find(struct arch *arch, const char *name);
+int ins__scnprintf(struct ins *ins, char *bf, size_t size,
+ struct ins_operands *ops, int max_ins_name);
+
+bool ins__is_call(const struct ins *ins);
+bool ins__is_jump(const struct ins *ins);
+bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
+bool ins__is_nop(const struct ins *ins);
+bool ins__is_ret(const struct ins *ins);
+bool ins__is_lock(const struct ins *ins);
+
+struct disasm_line *disasm_line__new(struct annotate_args *args);
+void disasm_line__free(struct disasm_line *dl);
+
+int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size,
+ bool raw, int max_ins_name);
+
+int symbol__disassemble(struct symbol *sym, struct annotate_args *args);
+
+#endif /* __PERF_UTIL_DISASM_H */
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 908e168137..7d180bdaed 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -33,13 +33,13 @@ static void al_to_d_al(struct addr_location *al, struct perf_dlfilter_al *d_al)
if (al->map) {
struct dso *dso = map__dso(al->map);
- if (symbol_conf.show_kernel_path && dso->long_name)
- d_al->dso = dso->long_name;
+ if (symbol_conf.show_kernel_path && dso__long_name(dso))
+ d_al->dso = dso__long_name(dso);
else
- d_al->dso = dso->name;
- d_al->is_64_bit = dso->is_64_bit;
- d_al->buildid_size = dso->bid.size;
- d_al->buildid = dso->bid.data;
+ d_al->dso = dso__name(dso);
+ d_al->is_64_bit = dso__is_64_bit(dso);
+ d_al->buildid_size = dso__bid(dso)->size;
+ d_al->buildid = dso__bid(dso)->data;
} else {
d_al->dso = NULL;
d_al->is_64_bit = 0;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 22fd5fa806..67414944f2 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -40,6 +40,12 @@ static const char * const debuglink_paths[] = {
"/usr/lib/debug%s/%s"
};
+void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi)
+{
+ nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo);
+ RC_CHK_ACCESS(dso)->nsinfo = nsi;
+}
+
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
@@ -63,14 +69,14 @@ char dso__symtab_origin(const struct dso *dso)
[DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
};
- if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
+ if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND)
return '!';
- return origin[dso->symtab_type];
+ return origin[dso__symtab_type(dso)];
}
bool dso__is_object_file(const struct dso *dso)
{
- switch (dso->binary_type) {
+ switch (dso__binary_type(dso)) {
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__JAVA_JIT:
@@ -117,7 +123,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
char symfile[PATH_MAX];
unsigned int i;
- len = __symbol__join_symfs(filename, size, dso->long_name);
+ len = __symbol__join_symfs(filename, size, dso__long_name(dso));
last_slash = filename + len;
while (last_slash != filename && *last_slash != '/')
last_slash--;
@@ -159,12 +165,12 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
- snprintf(filename + len, size - len, "%s.debug", dso->long_name);
+ snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso));
break;
case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
- snprintf(filename + len, size - len, "%s", dso->long_name);
+ snprintf(filename + len, size - len, "%s", dso__long_name(dso));
break;
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
@@ -173,13 +179,13 @@ int dso__read_binary_type_filename(const struct dso *dso,
* /usr/lib/debug/lib when it is expected to be in
* /usr/lib/debug/usr/lib
*/
- if (strlen(dso->long_name) < 9 ||
- strncmp(dso->long_name, "/usr/lib/", 9)) {
+ if (strlen(dso__long_name(dso)) < 9 ||
+ strncmp(dso__long_name(dso), "/usr/lib/", 9)) {
ret = -1;
break;
}
len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
- snprintf(filename + len, size - len, "%s", dso->long_name + 4);
+ snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4);
break;
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
@@ -187,29 +193,29 @@ int dso__read_binary_type_filename(const struct dso *dso,
const char *last_slash;
size_t dir_size;
- last_slash = dso->long_name + dso->long_name_len;
- while (last_slash != dso->long_name && *last_slash != '/')
+ last_slash = dso__long_name(dso) + dso__long_name_len(dso);
+ while (last_slash != dso__long_name(dso) && *last_slash != '/')
last_slash--;
len = __symbol__join_symfs(filename, size, "");
- dir_size = last_slash - dso->long_name + 2;
+ dir_size = last_slash - dso__long_name(dso) + 2;
if (dir_size > (size - len)) {
ret = -1;
break;
}
- len += scnprintf(filename + len, dir_size, "%s", dso->long_name);
+ len += scnprintf(filename + len, dir_size, "%s", dso__long_name(dso));
len += scnprintf(filename + len , size - len, ".debug%s",
last_slash);
break;
}
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
- if (!dso->has_build_id) {
+ if (!dso__has_build_id(dso)) {
ret = -1;
break;
}
- build_id__sprintf(&dso->bid, build_id_hex);
+ build_id__sprintf(dso__bid_const(dso), build_id_hex);
len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
snprintf(filename + len, size - len, "%.2s/%s.debug",
build_id_hex, build_id_hex + 2);
@@ -218,23 +224,23 @@ int dso__read_binary_type_filename(const struct dso *dso,
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
- __symbol__join_symfs(filename, size, dso->long_name);
+ __symbol__join_symfs(filename, size, dso__long_name(dso));
break;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
path__join3(filename, size, symbol_conf.symfs,
- root_dir, dso->long_name);
+ root_dir, dso__long_name(dso));
break;
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
- __symbol__join_symfs(filename, size, dso->long_name);
+ __symbol__join_symfs(filename, size, dso__long_name(dso));
break;
case DSO_BINARY_TYPE__KCORE:
case DSO_BINARY_TYPE__GUEST_KCORE:
- snprintf(filename, size, "%s", dso->long_name);
+ snprintf(filename, size, "%s", dso__long_name(dso));
break;
default:
@@ -310,8 +316,8 @@ bool is_kernel_module(const char *pathname, int cpumode)
bool dso__needs_decompress(struct dso *dso)
{
- return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+ return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
+ dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
}
int filename__decompress(const char *name, char *pathname,
@@ -363,11 +369,10 @@ static int decompress_kmodule(struct dso *dso, const char *name,
if (!dso__needs_decompress(dso))
return -1;
- if (dso->comp == COMP_ID__NONE)
+ if (dso__comp(dso) == COMP_ID__NONE)
return -1;
- return filename__decompress(name, pathname, len, dso->comp,
- &dso->load_errno);
+ return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso));
}
int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
@@ -468,17 +473,17 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m,
struct machine *machine)
{
if (machine__is_host(machine))
- dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE);
else
- dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE);
/* _KMODULE_COMP should be next to _KMODULE */
if (m->kmod && m->comp) {
- dso->symtab_type++;
- dso->comp = m->comp;
+ dso__set_symtab_type(dso, dso__symtab_type(dso) + 1);
+ dso__set_comp(dso, m->comp);
}
- dso->is_kmod = 1;
+ dso__set_is_kmod(dso);
dso__set_short_name(dso, strdup(m->name), true);
}
@@ -491,13 +496,21 @@ static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
static void dso__list_add(struct dso *dso)
{
- list_add_tail(&dso->data.open_entry, &dso__data_open);
+ list_add_tail(&dso__data(dso)->open_entry, &dso__data_open);
+#ifdef REFCNT_CHECKING
+ dso__data(dso)->dso = dso__get(dso);
+#endif
+ /* Assume the dso is part of dsos, hence the optional reference count above. */
+ assert(dso__dsos(dso));
dso__data_open_cnt++;
}
static void dso__list_del(struct dso *dso)
{
- list_del_init(&dso->data.open_entry);
+ list_del_init(&dso__data(dso)->open_entry);
+#ifdef REFCNT_CHECKING
+ dso__put(dso__data(dso)->dso);
+#endif
WARN_ONCE(dso__data_open_cnt <= 0,
"DSO data fd counter out of bounds.");
dso__data_open_cnt--;
@@ -528,7 +541,7 @@ static int do_open(char *name)
char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
{
- return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename);
+ return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename);
}
static int __open_dso(struct dso *dso, struct machine *machine)
@@ -541,18 +554,18 @@ static int __open_dso(struct dso *dso, struct machine *machine)
if (!name)
return -ENOMEM;
- mutex_lock(&dso->lock);
+ mutex_lock(dso__lock(dso));
if (machine)
root_dir = machine->root_dir;
- if (dso__read_binary_type_filename(dso, dso->binary_type,
+ if (dso__read_binary_type_filename(dso, dso__binary_type(dso),
root_dir, name, PATH_MAX))
goto out;
if (!is_regular_file(name)) {
char *new_name;
- if (errno != ENOENT || dso->nsinfo == NULL)
+ if (errno != ENOENT || dso__nsinfo(dso) == NULL)
goto out;
new_name = dso__filename_with_chroot(dso, name);
@@ -568,7 +581,7 @@ static int __open_dso(struct dso *dso, struct machine *machine)
size_t len = sizeof(newpath);
if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
- fd = -dso->load_errno;
+ fd = -(*dso__load_errno(dso));
goto out;
}
@@ -582,7 +595,7 @@ static int __open_dso(struct dso *dso, struct machine *machine)
unlink(name);
out:
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
free(name);
return fd;
}
@@ -601,13 +614,13 @@ static int open_dso(struct dso *dso, struct machine *machine)
int fd;
struct nscookie nsc;
- if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
- mutex_lock(&dso->lock);
- nsinfo__mountns_enter(dso->nsinfo, &nsc);
- mutex_unlock(&dso->lock);
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
+ mutex_lock(dso__lock(dso));
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ mutex_unlock(dso__lock(dso));
}
fd = __open_dso(dso, machine);
- if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE)
nsinfo__mountns_exit(&nsc);
if (fd >= 0) {
@@ -624,10 +637,10 @@ static int open_dso(struct dso *dso, struct machine *machine)
static void close_data_fd(struct dso *dso)
{
- if (dso->data.fd >= 0) {
- close(dso->data.fd);
- dso->data.fd = -1;
- dso->data.file_size = 0;
+ if (dso__data(dso)->fd >= 0) {
+ close(dso__data(dso)->fd);
+ dso__data(dso)->fd = -1;
+ dso__data(dso)->file_size = 0;
dso__list_del(dso);
}
}
@@ -646,9 +659,15 @@ static void close_dso(struct dso *dso)
static void close_first_dso(void)
{
+ struct dso_data *dso_data;
struct dso *dso;
- dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
+ dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry);
+#ifdef REFCNT_CHECKING
+ dso = dso_data->dso;
+#else
+ dso = container_of(dso_data, struct dso, data);
+#endif
close_dso(dso);
}
@@ -728,28 +747,29 @@ static void try_to_open_dso(struct dso *dso, struct machine *machine)
DSO_BINARY_TYPE__NOT_FOUND,
};
int i = 0;
+ struct dso_data *dso_data = dso__data(dso);
- if (dso->data.fd >= 0)
+ if (dso_data->fd >= 0)
return;
- if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
- dso->data.fd = open_dso(dso, machine);
+ if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) {
+ dso_data->fd = open_dso(dso, machine);
goto out;
}
do {
- dso->binary_type = binary_type_data[i++];
+ dso__set_binary_type(dso, binary_type_data[i++]);
- dso->data.fd = open_dso(dso, machine);
- if (dso->data.fd >= 0)
+ dso_data->fd = open_dso(dso, machine);
+ if (dso_data->fd >= 0)
goto out;
- } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
+ } while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND);
out:
- if (dso->data.fd >= 0)
- dso->data.status = DSO_DATA_STATUS_OK;
+ if (dso_data->fd >= 0)
+ dso_data->status = DSO_DATA_STATUS_OK;
else
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ dso_data->status = DSO_DATA_STATUS_ERROR;
}
/**
@@ -763,7 +783,7 @@ out:
*/
int dso__data_get_fd(struct dso *dso, struct machine *machine)
{
- if (dso->data.status == DSO_DATA_STATUS_ERROR)
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
return -1;
if (pthread_mutex_lock(&dso__data_open_lock) < 0)
@@ -771,10 +791,10 @@ int dso__data_get_fd(struct dso *dso, struct machine *machine)
try_to_open_dso(dso, machine);
- if (dso->data.fd < 0)
+ if (dso__data(dso)->fd < 0)
pthread_mutex_unlock(&dso__data_open_lock);
- return dso->data.fd;
+ return dso__data(dso)->fd;
}
void dso__data_put_fd(struct dso *dso __maybe_unused)
@@ -786,10 +806,10 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
{
u32 flag = 1 << by;
- if (dso->data.status_seen & flag)
+ if (dso__data(dso)->status_seen & flag)
return true;
- dso->data.status_seen |= flag;
+ dso__data(dso)->status_seen |= flag;
return false;
}
@@ -799,12 +819,13 @@ static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
{
struct bpf_prog_info_node *node;
ssize_t size = DSO__DATA_CACHE_SIZE;
+ struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
u64 len;
u8 *buf;
- node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
+ node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
if (!node || !node->info_linear) {
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
return -1;
}
@@ -822,14 +843,15 @@ static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
static int bpf_size(struct dso *dso)
{
struct bpf_prog_info_node *node;
+ struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
- node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
+ node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
if (!node || !node->info_linear) {
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
return -1;
}
- dso->data.file_size = node->info_linear->info.jited_prog_len;
+ dso__data(dso)->file_size = node->info_linear->info.jited_prog_len;
return 0;
}
#endif // HAVE_LIBBPF_SUPPORT
@@ -837,10 +859,10 @@ static int bpf_size(struct dso *dso)
static void
dso_cache__free(struct dso *dso)
{
- struct rb_root *root = &dso->data.cache;
+ struct rb_root *root = &dso__data(dso)->cache;
struct rb_node *next = rb_first(root);
- mutex_lock(&dso->lock);
+ mutex_lock(dso__lock(dso));
while (next) {
struct dso_cache *cache;
@@ -849,12 +871,12 @@ dso_cache__free(struct dso *dso)
rb_erase(&cache->rb_node, root);
free(cache);
}
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
}
static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
{
- const struct rb_root *root = &dso->data.cache;
+ const struct rb_root *root = &dso__data(dso)->cache;
struct rb_node * const *p = &root->rb_node;
const struct rb_node *parent = NULL;
struct dso_cache *cache;
@@ -880,13 +902,13 @@ static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
static struct dso_cache *
dso_cache__insert(struct dso *dso, struct dso_cache *new)
{
- struct rb_root *root = &dso->data.cache;
+ struct rb_root *root = &dso__data(dso)->cache;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct dso_cache *cache;
u64 offset = new->offset;
- mutex_lock(&dso->lock);
+ mutex_lock(dso__lock(dso));
while (*p != NULL) {
u64 end;
@@ -907,7 +929,7 @@ dso_cache__insert(struct dso *dso, struct dso_cache *new)
cache = NULL;
out:
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
return cache;
}
@@ -932,18 +954,18 @@ static ssize_t file_read(struct dso *dso, struct machine *machine,
pthread_mutex_lock(&dso__data_open_lock);
/*
- * dso->data.fd might be closed if other thread opened another
+ * dso__data(dso)->fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
try_to_open_dso(dso, machine);
- if (dso->data.fd < 0) {
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ if (dso__data(dso)->fd < 0) {
+ dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
ret = -errno;
goto out;
}
- ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
+ ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset);
out:
pthread_mutex_unlock(&dso__data_open_lock);
return ret;
@@ -963,11 +985,11 @@ static struct dso_cache *dso_cache__populate(struct dso *dso,
return NULL;
}
#ifdef HAVE_LIBBPF_SUPPORT
- if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
*ret = bpf_read(dso, cache_offset, cache->data);
else
#endif
- if (dso->binary_type == DSO_BINARY_TYPE__OOL)
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL)
*ret = DSO__DATA_CACHE_SIZE;
else
*ret = file_read(dso, machine, cache_offset, cache->data);
@@ -1056,25 +1078,25 @@ static int file_size(struct dso *dso, struct machine *machine)
pthread_mutex_lock(&dso__data_open_lock);
/*
- * dso->data.fd might be closed if other thread opened another
+ * dso__data(dso)->fd might be closed if other thread opened another
* file (dso) due to open file limit (RLIMIT_NOFILE).
*/
try_to_open_dso(dso, machine);
- if (dso->data.fd < 0) {
+ if (dso__data(dso)->fd < 0) {
ret = -errno;
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
goto out;
}
- if (fstat(dso->data.fd, &st) < 0) {
+ if (fstat(dso__data(dso)->fd, &st) < 0) {
ret = -errno;
pr_err("dso cache fstat failed: %s\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
- dso->data.status = DSO_DATA_STATUS_ERROR;
+ dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
goto out;
}
- dso->data.file_size = st.st_size;
+ dso__data(dso)->file_size = st.st_size;
out:
pthread_mutex_unlock(&dso__data_open_lock);
@@ -1083,13 +1105,13 @@ out:
int dso__data_file_size(struct dso *dso, struct machine *machine)
{
- if (dso->data.file_size)
+ if (dso__data(dso)->file_size)
return 0;
- if (dso->data.status == DSO_DATA_STATUS_ERROR)
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
return -1;
#ifdef HAVE_LIBBPF_SUPPORT
- if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
return bpf_size(dso);
#endif
return file_size(dso, machine);
@@ -1108,7 +1130,7 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
return -1;
/* For now just estimate dso data size is close to file size */
- return dso->data.file_size;
+ return dso__data(dso)->file_size;
}
static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
@@ -1119,7 +1141,7 @@ static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
return -1;
/* Check the offset sanity. */
- if (offset > dso->data.file_size)
+ if (offset > dso__data(dso)->file_size)
return -1;
if (offset + size < offset)
@@ -1142,7 +1164,7 @@ static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
- if (dso->data.status == DSO_DATA_STATUS_ERROR)
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_write_offset(dso, machine, offset, data, size, true);
@@ -1182,7 +1204,7 @@ ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
{
u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
- if (dso->data.status == DSO_DATA_STATUS_ERROR)
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
return -1;
return data_read_write_offset(dso, machine, offset, data, size, false);
@@ -1235,56 +1257,139 @@ struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
*/
if (dso != NULL) {
dso__set_short_name(dso, short_name, false);
- dso->kernel = dso_type;
+ dso__set_kernel(dso, dso_type);
}
return dso;
}
-static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
+static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
{
- struct rb_root *root = dso->root;
+ struct dsos *dsos = dso__dsos(dso);
if (name == NULL)
return;
- if (dso->long_name_allocated)
- free((char *)dso->long_name);
-
- if (root) {
- rb_erase(&dso->rb_node, root);
+ if (dsos) {
/*
- * __dsos__findnew_link_by_longname_id() isn't guaranteed to
- * add it back, so a clean removal is required here.
+ * Need to avoid re-sorting the dsos breaking by non-atomically
+ * renaming the dso.
*/
- RB_CLEAR_NODE(&dso->rb_node);
- dso->root = NULL;
+ down_write(&dsos->lock);
+ }
+
+ if (dso__long_name_allocated(dso))
+ free((char *)dso__long_name(dso));
+
+ RC_CHK_ACCESS(dso)->long_name = name;
+ RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
+ dso__set_long_name_allocated(dso, name_allocated);
+
+ if (dsos) {
+ dsos->sorted = false;
+ up_write(&dsos->lock);
+ }
+}
+
+static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
+{
+ if (a->maj > b->maj) return -1;
+ if (a->maj < b->maj) return 1;
+
+ if (a->min > b->min) return -1;
+ if (a->min < b->min) return 1;
+
+ if (a->ino > b->ino) return -1;
+ if (a->ino < b->ino) return 1;
+
+ /*
+ * Synthesized MMAP events have zero ino_generation, avoid comparing
+ * them with MMAP events with actual ino_generation.
+ *
+ * I found it harmful because the mismatch resulted in a new
+ * dso that did not have a build ID whereas the original dso did have a
+ * build ID. The build ID was essential because the object was not found
+ * otherwise. - Adrian
+ */
+ if (a->ino_generation && b->ino_generation) {
+ if (a->ino_generation > b->ino_generation) return -1;
+ if (a->ino_generation < b->ino_generation) return 1;
}
- dso->long_name = name;
- dso->long_name_len = strlen(name);
- dso->long_name_allocated = name_allocated;
+ return 0;
+}
+
+bool dso_id__empty(const struct dso_id *id)
+{
+ if (!id)
+ return true;
+
+ return !id->maj && !id->min && !id->ino && !id->ino_generation;
+}
+
+void __dso__inject_id(struct dso *dso, struct dso_id *id)
+{
+ struct dsos *dsos = dso__dsos(dso);
+ struct dso_id *dso_id = dso__id(dso);
+
+ /* dsos write lock held by caller. */
+
+ dso_id->maj = id->maj;
+ dso_id->min = id->min;
+ dso_id->ino = id->ino;
+ dso_id->ino_generation = id->ino_generation;
+
+ if (dsos)
+ dsos->sorted = false;
+}
- if (root)
- __dsos__findnew_link_by_longname_id(root, dso, NULL, id);
+int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
+{
+ /*
+ * The second is always dso->id, so zeroes if not set, assume passing
+ * NULL for a means a zeroed id
+ */
+ if (dso_id__empty(a) || dso_id__empty(b))
+ return 0;
+
+ return __dso_id__cmp(a, b);
+}
+
+int dso__cmp_id(struct dso *a, struct dso *b)
+{
+ return __dso_id__cmp(dso__id(a), dso__id(b));
}
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
{
- dso__set_long_name_id(dso, name, NULL, name_allocated);
+ dso__set_long_name_id(dso, name, name_allocated);
}
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
{
+ struct dsos *dsos = dso__dsos(dso);
+
if (name == NULL)
return;
- if (dso->short_name_allocated)
- free((char *)dso->short_name);
+ if (dsos) {
+ /*
+ * Need to avoid re-sorting the dsos breaking by non-atomically
+ * renaming the dso.
+ */
+ down_write(&dsos->lock);
+ }
+ if (dso__short_name_allocated(dso))
+ free((char *)dso__short_name(dso));
+
+ RC_CHK_ACCESS(dso)->short_name = name;
+ RC_CHK_ACCESS(dso)->short_name_len = strlen(name);
+ dso__set_short_name_allocated(dso, name_allocated);
- dso->short_name = name;
- dso->short_name_len = strlen(name);
- dso->short_name_allocated = name_allocated;
+ if (dsos) {
+ dsos->sorted = false;
+ up_write(&dsos->lock);
+ }
}
int dso__name_len(const struct dso *dso)
@@ -1292,43 +1397,48 @@ int dso__name_len(const struct dso *dso)
if (!dso)
return strlen("[unknown]");
if (verbose > 0)
- return dso->long_name_len;
+ return dso__long_name_len(dso);
- return dso->short_name_len;
+ return dso__short_name_len(dso);
}
bool dso__loaded(const struct dso *dso)
{
- return dso->loaded;
+ return RC_CHK_ACCESS(dso)->loaded;
}
bool dso__sorted_by_name(const struct dso *dso)
{
- return dso->sorted_by_name;
+ return RC_CHK_ACCESS(dso)->sorted_by_name;
}
void dso__set_sorted_by_name(struct dso *dso)
{
- dso->sorted_by_name = true;
+ RC_CHK_ACCESS(dso)->sorted_by_name = true;
}
struct dso *dso__new_id(const char *name, struct dso_id *id)
{
- struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
+ RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1);
+ struct dso *res;
+ struct dso_data *data;
- if (dso != NULL) {
+ if (!dso)
+ return NULL;
+
+ if (ADD_RC_CHK(res, dso)) {
strcpy(dso->name, name);
if (id)
dso->id = *id;
- dso__set_long_name_id(dso, dso->name, id, false);
- dso__set_short_name(dso, dso->name, false);
+ dso__set_long_name_id(res, dso->name, false);
+ dso__set_short_name(res, dso->name, false);
dso->symbols = RB_ROOT_CACHED;
dso->symbol_names = NULL;
dso->symbol_names_len = 0;
- dso->data.cache = RB_ROOT;
dso->inlined_nodes = RB_ROOT_CACHED;
dso->srclines = RB_ROOT_CACHED;
dso->data_types = RB_ROOT;
+ dso->global_vars = RB_ROOT;
dso->data.fd = -1;
dso->data.status = DSO_DATA_STATUS_UNKNOWN;
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
@@ -1344,15 +1454,18 @@ struct dso *dso__new_id(const char *name, struct dso_id *id)
dso->is_kmod = 0;
dso->needs_swap = DSO_SWAP__UNSET;
dso->comp = COMP_ID__NONE;
- RB_CLEAR_NODE(&dso->rb_node);
- dso->root = NULL;
- INIT_LIST_HEAD(&dso->node);
- INIT_LIST_HEAD(&dso->data.open_entry);
mutex_init(&dso->lock);
refcount_set(&dso->refcnt, 1);
+ data = &dso->data;
+ data->cache = RB_ROOT;
+ data->fd = -1;
+ data->status = DSO_DATA_STATUS_UNKNOWN;
+ INIT_LIST_HEAD(&data->open_entry);
+#ifdef REFCNT_CHECKING
+ data->dso = NULL; /* Set when on the open_entry list. */
+#endif
}
-
- return dso;
+ return res;
}
struct dso *dso__new(const char *name)
@@ -1362,71 +1475,78 @@ struct dso *dso__new(const char *name)
void dso__delete(struct dso *dso)
{
- if (!RB_EMPTY_NODE(&dso->rb_node))
- pr_err("DSO %s is still in rbtree when being deleted!\n",
- dso->long_name);
+ if (dso__dsos(dso))
+ pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso));
/* free inlines first, as they reference symbols */
- inlines__tree_delete(&dso->inlined_nodes);
- srcline__tree_delete(&dso->srclines);
- symbols__delete(&dso->symbols);
- dso->symbol_names_len = 0;
- zfree(&dso->symbol_names);
- annotated_data_type__tree_delete(&dso->data_types);
-
- if (dso->short_name_allocated) {
- zfree((char **)&dso->short_name);
- dso->short_name_allocated = false;
+ inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes);
+ srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines);
+ symbols__delete(&RC_CHK_ACCESS(dso)->symbols);
+ RC_CHK_ACCESS(dso)->symbol_names_len = 0;
+ zfree(&RC_CHK_ACCESS(dso)->symbol_names);
+ annotated_data_type__tree_delete(dso__data_types(dso));
+ global_var_type__tree_delete(dso__global_vars(dso));
+
+ if (RC_CHK_ACCESS(dso)->short_name_allocated) {
+ zfree((char **)&RC_CHK_ACCESS(dso)->short_name);
+ RC_CHK_ACCESS(dso)->short_name_allocated = false;
}
- if (dso->long_name_allocated) {
- zfree((char **)&dso->long_name);
- dso->long_name_allocated = false;
+ if (RC_CHK_ACCESS(dso)->long_name_allocated) {
+ zfree((char **)&RC_CHK_ACCESS(dso)->long_name);
+ RC_CHK_ACCESS(dso)->long_name_allocated = false;
}
dso__data_close(dso);
- auxtrace_cache__free(dso->auxtrace_cache);
+ auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache);
dso_cache__free(dso);
dso__free_a2l(dso);
- zfree(&dso->symsrc_filename);
- nsinfo__zput(dso->nsinfo);
- mutex_destroy(&dso->lock);
- free(dso);
+ dso__free_symsrc_filename(dso);
+ nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo);
+ mutex_destroy(dso__lock(dso));
+ RC_CHK_FREE(dso);
}
struct dso *dso__get(struct dso *dso)
{
- if (dso)
- refcount_inc(&dso->refcnt);
- return dso;
+ struct dso *result;
+
+ if (RC_CHK_GET(result, dso))
+ refcount_inc(&RC_CHK_ACCESS(dso)->refcnt);
+
+ return result;
}
void dso__put(struct dso *dso)
{
- if (dso && refcount_dec_and_test(&dso->refcnt))
+ if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt))
dso__delete(dso);
+ else
+ RC_CHK_PUT(dso);
}
void dso__set_build_id(struct dso *dso, struct build_id *bid)
{
- dso->bid = *bid;
- dso->has_build_id = 1;
+ RC_CHK_ACCESS(dso)->bid = *bid;
+ RC_CHK_ACCESS(dso)->has_build_id = 1;
}
bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
{
- if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
+ const struct build_id *dso_bid = dso__bid_const(dso);
+
+ if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) {
/*
* For the backward compatibility, it allows a build-id has
* trailing zeros.
*/
- return !memcmp(dso->bid.data, bid->data, bid->size) &&
- !memchr_inv(&dso->bid.data[bid->size], 0,
- dso->bid.size - bid->size);
+ return !memcmp(dso_bid->data, bid->data, bid->size) &&
+ !memchr_inv(&dso_bid->data[bid->size], 0,
+ dso_bid->size - bid->size);
}
- return dso->bid.size == bid->size &&
- memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
+ return dso_bid->size == bid->size &&
+ memcmp(dso_bid->data, bid->data, dso_bid->size) == 0;
}
void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
@@ -1436,8 +1556,8 @@ void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
if (machine__is_default_guest(machine))
return;
sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
- if (sysfs__read_build_id(path, &dso->bid) == 0)
- dso->has_build_id = true;
+ if (sysfs__read_build_id(path, dso__bid(dso)) == 0)
+ dso__set_has_build_id(dso);
}
int dso__kernel_module_get_build_id(struct dso *dso,
@@ -1448,14 +1568,14 @@ int dso__kernel_module_get_build_id(struct dso *dso,
* kernel module short names are of the form "[module]" and
* we need just "module" here.
*/
- const char *name = dso->short_name + 1;
+ const char *name = dso__short_name(dso) + 1;
snprintf(filename, sizeof(filename),
"%s/sys/module/%.*s/notes/.note.gnu.build-id",
root_dir, (int)strlen(name) - 1, name);
- if (sysfs__read_build_id(filename, &dso->bid) == 0)
- dso->has_build_id = true;
+ if (sysfs__read_build_id(filename, dso__bid(dso)) == 0)
+ dso__set_has_build_id(dso);
return 0;
}
@@ -1464,21 +1584,21 @@ static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
return fprintf(fp, "%s", sbuild_id);
}
size_t dso__fprintf(struct dso *dso, FILE *fp)
{
struct rb_node *nd;
- size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
+ size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso));
- if (dso->short_name != dso->long_name)
- ret += fprintf(fp, "%s, ", dso->long_name);
+ if (dso__short_name(dso) != dso__long_name(dso))
+ ret += fprintf(fp, "%s, ", dso__long_name(dso));
ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
ret += dso__fprintf_buildid(dso, fp);
ret += fprintf(fp, ")\n");
- for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
+ for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) {
struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
ret += symbol__fprintf(pos, fp);
}
@@ -1502,7 +1622,7 @@ enum dso_type dso__type(struct dso *dso, struct machine *machine)
int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
{
- int idx, errnum = dso->load_errno;
+ int idx, errnum = *dso__load_errno(dso);
/*
* This must have a same ordering as the enum dso_load_errno.
*/
@@ -1532,3 +1652,15 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
return 0;
}
+
+bool perf_pid_map_tid(const char *dso_name, int *tid)
+{
+ return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1;
+}
+
+bool is_perf_pid_map_name(const char *dso_name)
+{
+ int tid;
+
+ return perf_pid_map_tid(dso_name, &tid);
+}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index ce9f3849a7..ed0068251c 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include "build-id.h"
#include "mutex.h"
+#include <internal/rc_check.h>
struct machine;
struct map;
@@ -100,26 +101,27 @@ enum dso_load_errno {
__DSO_LOAD_ERRNO__END,
};
-#define DSO__SWAP(dso, type, val) \
-({ \
- type ____r = val; \
- BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \
- if (dso->needs_swap == DSO_SWAP__YES) { \
- switch (sizeof(____r)) { \
- case 2: \
- ____r = bswap_16(val); \
- break; \
- case 4: \
- ____r = bswap_32(val); \
- break; \
- case 8: \
- ____r = bswap_64(val); \
- break; \
- default: \
- BUG_ON(1); \
- } \
- } \
- ____r; \
+#define DSO__SWAP(dso, type, val) \
+({ \
+ type ____r = val; \
+ enum dso_swap_type ___dst = dso__needs_swap(dso); \
+ BUG_ON(___dst == DSO_SWAP__UNSET); \
+ if (___dst == DSO_SWAP__YES) { \
+ switch (sizeof(____r)) { \
+ case 2: \
+ ____r = bswap_16(val); \
+ break; \
+ case 4: \
+ ____r = bswap_32(val); \
+ break; \
+ case 8: \
+ ____r = bswap_64(val); \
+ break; \
+ default: \
+ BUG_ON(1); \
+ } \
+ } \
+ ____r; \
})
#define DSO__DATA_CACHE_SIZE 4096
@@ -142,33 +144,77 @@ struct dso_cache {
char data[];
};
+struct dso_data {
+ struct rb_root cache;
+ struct list_head open_entry;
+#ifdef REFCNT_CHECKING
+ struct dso *dso;
+#endif
+ int fd;
+ int status;
+ u32 status_seen;
+ u64 file_size;
+ u64 elf_base_addr;
+ u64 debug_frame_offset;
+ u64 eh_frame_hdr_addr;
+ u64 eh_frame_hdr_offset;
+};
+
+struct dso_bpf_prog {
+ u32 id;
+ u32 sub_id;
+ struct perf_env *env;
+};
+
struct auxtrace_cache;
-struct dso {
+DECLARE_RC_STRUCT(dso) {
struct mutex lock;
- struct list_head node;
- struct rb_node rb_node; /* rbtree node sorted by long name */
- struct rb_root *root; /* root of rbtree that rb_node is in */
+ struct dsos *dsos;
struct rb_root_cached symbols;
struct symbol **symbol_names;
size_t symbol_names_len;
struct rb_root_cached inlined_nodes;
struct rb_root_cached srclines;
- struct rb_root data_types;
+ struct rb_root data_types;
+ struct rb_root global_vars;
struct {
u64 addr;
struct symbol *symbol;
} last_find_result;
+ struct build_id bid;
+ u64 text_offset;
+ u64 text_end;
+ const char *short_name;
+ const char *long_name;
void *a2l;
char *symsrc_filename;
+#if defined(__powerpc__)
+ void *dwfl; /* DWARF debug info */
+#endif
+ struct nsinfo *nsinfo;
+ struct auxtrace_cache *auxtrace_cache;
+ union { /* Tool specific area */
+ void *priv;
+ u64 db_id;
+ };
+ /* bpf prog information */
+ struct dso_bpf_prog bpf_prog;
+ /* dso data file */
+ struct dso_data data;
+ struct dso_id id;
unsigned int a2l_fails;
- enum dso_space_type kernel;
- bool is_kmod;
- enum dso_swap_type needs_swap;
- enum dso_binary_type symtab_type;
- enum dso_binary_type binary_type;
+ int comp;
+ refcount_t refcnt;
enum dso_load_errno load_errno;
+ u16 long_name_len;
+ u16 short_name_len;
+ enum dso_binary_type symtab_type:8;
+ enum dso_binary_type binary_type:8;
+ enum dso_space_type kernel:2;
+ enum dso_swap_type needs_swap:2;
+ bool is_kmod:1;
u8 adjust_symbols:1;
u8 has_build_id:1;
u8 header_build_id:1;
@@ -182,44 +228,6 @@ struct dso {
bool sorted_by_name;
bool loaded;
u8 rel;
- struct build_id bid;
- u64 text_offset;
- u64 text_end;
- const char *short_name;
- const char *long_name;
- u16 long_name_len;
- u16 short_name_len;
- void *dwfl; /* DWARF debug info */
- struct auxtrace_cache *auxtrace_cache;
- int comp;
-
- /* dso data file */
- struct {
- struct rb_root cache;
- int fd;
- int status;
- u32 status_seen;
- u64 file_size;
- struct list_head open_entry;
- u64 elf_base_addr;
- u64 debug_frame_offset;
- u64 eh_frame_hdr_addr;
- u64 eh_frame_hdr_offset;
- } data;
- /* bpf prog information */
- struct {
- u32 id;
- u32 sub_id;
- struct perf_env *env;
- } bpf_prog;
-
- union { /* Tool specific area */
- void *priv;
- u64 db_id;
- };
- struct nsinfo *nsinfo;
- struct dso_id id;
- refcount_t refcnt;
char name[];
};
@@ -230,19 +238,408 @@ struct dso {
* @n: the 'struct rb_node *' to use as a temporary storage
*/
#define dso__for_each_symbol(dso, pos, n) \
- symbols__for_each_entry(&(dso)->symbols, pos, n)
+ symbols__for_each_entry(dso__symbols(dso), pos, n)
+
+static inline void *dso__a2l(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->a2l;
+}
+
+static inline void dso__set_a2l(struct dso *dso, void *val)
+{
+ RC_CHK_ACCESS(dso)->a2l = val;
+}
+
+static inline unsigned int dso__a2l_fails(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->a2l_fails;
+}
+
+static inline void dso__set_a2l_fails(struct dso *dso, unsigned int val)
+{
+ RC_CHK_ACCESS(dso)->a2l_fails = val;
+}
+
+static inline bool dso__adjust_symbols(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->adjust_symbols;
+}
+
+static inline void dso__set_adjust_symbols(struct dso *dso, bool val)
+{
+ RC_CHK_ACCESS(dso)->adjust_symbols = val;
+}
+
+static inline bool dso__annotate_warned(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->annotate_warned;
+}
+
+static inline void dso__set_annotate_warned(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->annotate_warned = 1;
+}
+
+static inline bool dso__auxtrace_warned(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->auxtrace_warned;
+}
-#define dsos__for_each_with_build_id(pos, head) \
- list_for_each_entry(pos, head, node) \
- if (!pos->has_build_id) \
- continue; \
- else
+static inline void dso__set_auxtrace_warned(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->auxtrace_warned = 1;
+}
+
+static inline struct auxtrace_cache *dso__auxtrace_cache(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->auxtrace_cache;
+}
+
+static inline void dso__set_auxtrace_cache(struct dso *dso, struct auxtrace_cache *cache)
+{
+ RC_CHK_ACCESS(dso)->auxtrace_cache = cache;
+}
+
+static inline struct build_id *dso__bid(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->bid;
+}
+
+static inline const struct build_id *dso__bid_const(const struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->bid;
+}
+
+static inline struct dso_bpf_prog *dso__bpf_prog(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->bpf_prog;
+}
+
+static inline bool dso__has_build_id(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->has_build_id;
+}
+
+static inline void dso__set_has_build_id(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->has_build_id = true;
+}
+
+static inline bool dso__has_srcline(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->has_srcline;
+}
+
+static inline void dso__set_has_srcline(struct dso *dso, bool val)
+{
+ RC_CHK_ACCESS(dso)->has_srcline = val;
+}
+
+static inline int dso__comp(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->comp;
+}
+
+static inline void dso__set_comp(struct dso *dso, int comp)
+{
+ RC_CHK_ACCESS(dso)->comp = comp;
+}
+
+static inline struct dso_data *dso__data(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->data;
+}
+
+static inline u64 dso__db_id(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->db_id;
+}
+
+static inline void dso__set_db_id(struct dso *dso, u64 db_id)
+{
+ RC_CHK_ACCESS(dso)->db_id = db_id;
+}
+
+static inline struct dsos *dso__dsos(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->dsos;
+}
+
+static inline void dso__set_dsos(struct dso *dso, struct dsos *dsos)
+{
+ RC_CHK_ACCESS(dso)->dsos = dsos;
+}
+
+static inline bool dso__header_build_id(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->header_build_id;
+}
+
+static inline void dso__set_header_build_id(struct dso *dso, bool val)
+{
+ RC_CHK_ACCESS(dso)->header_build_id = val;
+}
+
+static inline bool dso__hit(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->hit;
+}
+
+static inline void dso__set_hit(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->hit = 1;
+}
+
+static inline struct dso_id *dso__id(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->id;
+}
+
+static inline const struct dso_id *dso__id_const(const struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->id;
+}
+
+static inline struct rb_root_cached *dso__inlined_nodes(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->inlined_nodes;
+}
+
+static inline bool dso__is_64_bit(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->is_64_bit;
+}
+
+static inline void dso__set_is_64_bit(struct dso *dso, bool is)
+{
+ RC_CHK_ACCESS(dso)->is_64_bit = is;
+}
+
+static inline bool dso__is_kmod(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->is_kmod;
+}
+
+static inline void dso__set_is_kmod(struct dso *dso)
+{
+ RC_CHK_ACCESS(dso)->is_kmod = 1;
+}
+
+static inline enum dso_space_type dso__kernel(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->kernel;
+}
+
+static inline void dso__set_kernel(struct dso *dso, enum dso_space_type kernel)
+{
+ RC_CHK_ACCESS(dso)->kernel = kernel;
+}
+
+static inline u64 dso__last_find_result_addr(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->last_find_result.addr;
+}
+
+static inline void dso__set_last_find_result_addr(struct dso *dso, u64 addr)
+{
+ RC_CHK_ACCESS(dso)->last_find_result.addr = addr;
+}
+
+static inline struct symbol *dso__last_find_result_symbol(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->last_find_result.symbol;
+}
+
+static inline void dso__set_last_find_result_symbol(struct dso *dso, struct symbol *symbol)
+{
+ RC_CHK_ACCESS(dso)->last_find_result.symbol = symbol;
+}
+
+static inline enum dso_load_errno *dso__load_errno(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->load_errno;
+}
static inline void dso__set_loaded(struct dso *dso)
{
- dso->loaded = true;
+ RC_CHK_ACCESS(dso)->loaded = true;
+}
+
+static inline struct mutex *dso__lock(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->lock;
+}
+
+static inline const char *dso__long_name(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->long_name;
}
+static inline bool dso__long_name_allocated(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->long_name_allocated;
+}
+
+static inline void dso__set_long_name_allocated(struct dso *dso, bool allocated)
+{
+ RC_CHK_ACCESS(dso)->long_name_allocated = allocated;
+}
+
+static inline u16 dso__long_name_len(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->long_name_len;
+}
+
+static inline const char *dso__name(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->name;
+}
+
+static inline enum dso_swap_type dso__needs_swap(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->needs_swap;
+}
+
+static inline void dso__set_needs_swap(struct dso *dso, enum dso_swap_type type)
+{
+ RC_CHK_ACCESS(dso)->needs_swap = type;
+}
+
+static inline struct nsinfo *dso__nsinfo(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->nsinfo;
+}
+
+static inline const struct nsinfo *dso__nsinfo_const(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->nsinfo;
+}
+
+static inline struct nsinfo **dso__nsinfo_ptr(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->nsinfo;
+}
+
+void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi);
+
+static inline u8 dso__rel(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->rel;
+}
+
+static inline void dso__set_rel(struct dso *dso, u8 rel)
+{
+ RC_CHK_ACCESS(dso)->rel = rel;
+}
+
+static inline const char *dso__short_name(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->short_name;
+}
+
+static inline bool dso__short_name_allocated(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->short_name_allocated;
+}
+
+static inline void dso__set_short_name_allocated(struct dso *dso, bool allocated)
+{
+ RC_CHK_ACCESS(dso)->short_name_allocated = allocated;
+}
+
+static inline u16 dso__short_name_len(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->short_name_len;
+}
+
+static inline struct rb_root_cached *dso__srclines(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->srclines;
+}
+
+static inline struct rb_root *dso__data_types(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->data_types;
+}
+
+static inline struct rb_root *dso__global_vars(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->global_vars;
+}
+
+static inline struct rb_root_cached *dso__symbols(struct dso *dso)
+{
+ return &RC_CHK_ACCESS(dso)->symbols;
+}
+
+static inline struct symbol **dso__symbol_names(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->symbol_names;
+}
+
+static inline void dso__set_symbol_names(struct dso *dso, struct symbol **names)
+{
+ RC_CHK_ACCESS(dso)->symbol_names = names;
+}
+
+static inline size_t dso__symbol_names_len(struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->symbol_names_len;
+}
+
+static inline void dso__set_symbol_names_len(struct dso *dso, size_t len)
+{
+ RC_CHK_ACCESS(dso)->symbol_names_len = len;
+}
+
+static inline const char *dso__symsrc_filename(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->symsrc_filename;
+}
+
+static inline void dso__set_symsrc_filename(struct dso *dso, char *val)
+{
+ RC_CHK_ACCESS(dso)->symsrc_filename = val;
+}
+
+static inline void dso__free_symsrc_filename(struct dso *dso)
+{
+ zfree(&RC_CHK_ACCESS(dso)->symsrc_filename);
+}
+
+static inline enum dso_binary_type dso__symtab_type(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->symtab_type;
+}
+
+static inline void dso__set_symtab_type(struct dso *dso, enum dso_binary_type bt)
+{
+ RC_CHK_ACCESS(dso)->symtab_type = bt;
+}
+
+static inline u64 dso__text_end(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->text_end;
+}
+
+static inline void dso__set_text_end(struct dso *dso, u64 val)
+{
+ RC_CHK_ACCESS(dso)->text_end = val;
+}
+
+static inline u64 dso__text_offset(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->text_offset;
+}
+
+static inline void dso__set_text_offset(struct dso *dso, u64 val)
+{
+ RC_CHK_ACCESS(dso)->text_offset = val;
+}
+
+int dso_id__cmp(const struct dso_id *a, const struct dso_id *b);
+bool dso_id__empty(const struct dso_id *id);
+
struct dso *dso__new_id(const char *name, struct dso_id *id);
struct dso *dso__new(const char *name);
void dso__delete(struct dso *dso);
@@ -250,6 +647,7 @@ void dso__delete(struct dso *dso);
int dso__cmp_id(struct dso *a, struct dso *b);
void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated);
void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
+void __dso__inject_id(struct dso *dso, struct dso_id *id);
int dso__name_len(const struct dso *dso);
@@ -268,7 +666,7 @@ bool dso__loaded(const struct dso *dso);
static inline bool dso__has_symbols(const struct dso *dso)
{
- return !RB_EMPTY_ROOT(&dso->symbols.rb_root);
+ return !RB_EMPTY_ROOT(&RC_CHK_ACCESS(dso)->symbols.rb_root);
}
char *dso__filename_with_chroot(const struct dso *dso, const char *filename);
@@ -384,21 +782,33 @@ void dso__reset_find_symbol_cache(struct dso *dso);
size_t dso__fprintf_symbols_by_name(struct dso *dso, FILE *fp);
size_t dso__fprintf(struct dso *dso, FILE *fp);
+static inline enum dso_binary_type dso__binary_type(const struct dso *dso)
+{
+ return RC_CHK_ACCESS(dso)->binary_type;
+}
+
+static inline void dso__set_binary_type(struct dso *dso, enum dso_binary_type bt)
+{
+ RC_CHK_ACCESS(dso)->binary_type = bt;
+}
+
static inline bool dso__is_vmlinux(const struct dso *dso)
{
- return dso->binary_type == DSO_BINARY_TYPE__VMLINUX ||
- dso->binary_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+ enum dso_binary_type bt = dso__binary_type(dso);
+
+ return bt == DSO_BINARY_TYPE__VMLINUX || bt == DSO_BINARY_TYPE__GUEST_VMLINUX;
}
static inline bool dso__is_kcore(const struct dso *dso)
{
- return dso->binary_type == DSO_BINARY_TYPE__KCORE ||
- dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
+ enum dso_binary_type bt = dso__binary_type(dso);
+
+ return bt == DSO_BINARY_TYPE__KCORE || bt == DSO_BINARY_TYPE__GUEST_KCORE;
}
static inline bool dso__is_kallsyms(const struct dso *dso)
{
- return dso->kernel && dso->long_name[0] != '/';
+ return RC_CHK_ACCESS(dso)->kernel && RC_CHK_ACCESS(dso)->long_name[0] != '/';
}
bool dso__is_object_file(const struct dso *dso);
@@ -411,4 +821,11 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen);
void reset_fd_limit(void);
+u64 dso__find_global_type(struct dso *dso, u64 addr);
+u64 dso__findnew_global_type(struct dso *dso, u64 addr, u64 offset);
+
+/* Check if dso name is of format "/tmp/perf-%d.map" */
+bool perf_pid_map_tid(const char *dso_name, int *tid);
+bool is_perf_pid_map_name(const char *dso_name);
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/dsos.c b/tools/perf/util/dsos.c
index cf80aa42dd..a69a9c6612 100644
--- a/tools/perf/util/dsos.c
+++ b/tools/perf/util/dsos.c
@@ -12,115 +12,140 @@
#include <symbol.h> // filename__read_build_id
#include <unistd.h>
-static int __dso_id__cmp(struct dso_id *a, struct dso_id *b)
+void dsos__init(struct dsos *dsos)
{
- if (a->maj > b->maj) return -1;
- if (a->maj < b->maj) return 1;
+ init_rwsem(&dsos->lock);
- if (a->min > b->min) return -1;
- if (a->min < b->min) return 1;
+ dsos->cnt = 0;
+ dsos->allocated = 0;
+ dsos->dsos = NULL;
+ dsos->sorted = true;
+}
- if (a->ino > b->ino) return -1;
- if (a->ino < b->ino) return 1;
+static void dsos__purge(struct dsos *dsos)
+{
+ down_write(&dsos->lock);
- /*
- * Synthesized MMAP events have zero ino_generation, avoid comparing
- * them with MMAP events with actual ino_generation.
- *
- * I found it harmful because the mismatch resulted in a new
- * dso that did not have a build ID whereas the original dso did have a
- * build ID. The build ID was essential because the object was not found
- * otherwise. - Adrian
- */
- if (a->ino_generation && b->ino_generation) {
- if (a->ino_generation > b->ino_generation) return -1;
- if (a->ino_generation < b->ino_generation) return 1;
- }
+ for (unsigned int i = 0; i < dsos->cnt; i++) {
+ struct dso *dso = dsos->dsos[i];
- return 0;
-}
+ dso__set_dsos(dso, NULL);
+ dso__put(dso);
+ }
-static bool dso_id__empty(struct dso_id *id)
-{
- if (!id)
- return true;
+ zfree(&dsos->dsos);
+ dsos->cnt = 0;
+ dsos->allocated = 0;
+ dsos->sorted = true;
- return !id->maj && !id->min && !id->ino && !id->ino_generation;
+ up_write(&dsos->lock);
}
-static void dso__inject_id(struct dso *dso, struct dso_id *id)
+void dsos__exit(struct dsos *dsos)
{
- dso->id.maj = id->maj;
- dso->id.min = id->min;
- dso->id.ino = id->ino;
- dso->id.ino_generation = id->ino_generation;
+ dsos__purge(dsos);
+ exit_rwsem(&dsos->lock);
}
-static int dso_id__cmp(struct dso_id *a, struct dso_id *b)
+
+static int __dsos__for_each_dso(struct dsos *dsos,
+ int (*cb)(struct dso *dso, void *data),
+ void *data)
{
- /*
- * The second is always dso->id, so zeroes if not set, assume passing
- * NULL for a means a zeroed id
- */
- if (dso_id__empty(a) || dso_id__empty(b))
- return 0;
+ for (unsigned int i = 0; i < dsos->cnt; i++) {
+ struct dso *dso = dsos->dsos[i];
+ int err;
- return __dso_id__cmp(a, b);
+ err = cb(dso, data);
+ if (err)
+ return err;
+ }
+ return 0;
}
-int dso__cmp_id(struct dso *a, struct dso *b)
-{
- return __dso_id__cmp(&a->id, &b->id);
-}
+struct dsos__read_build_ids_cb_args {
+ bool with_hits;
+ bool have_build_id;
+};
-bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
+static int dsos__read_build_ids_cb(struct dso *dso, void *data)
{
- bool have_build_id = false;
- struct dso *pos;
+ struct dsos__read_build_ids_cb_args *args = data;
struct nscookie nsc;
- list_for_each_entry(pos, head, node) {
- if (with_hits && !pos->hit && !dso__is_vdso(pos))
- continue;
- if (pos->has_build_id) {
- have_build_id = true;
- continue;
- }
- nsinfo__mountns_enter(pos->nsinfo, &nsc);
- if (filename__read_build_id(pos->long_name, &pos->bid) > 0) {
- have_build_id = true;
- pos->has_build_id = true;
- } else if (errno == ENOENT && pos->nsinfo) {
- char *new_name = dso__filename_with_chroot(pos, pos->long_name);
-
- if (new_name && filename__read_build_id(new_name,
- &pos->bid) > 0) {
- have_build_id = true;
- pos->has_build_id = true;
- }
- free(new_name);
+ if (args->with_hits && !dso__hit(dso) && !dso__is_vdso(dso))
+ return 0;
+ if (dso__has_build_id(dso)) {
+ args->have_build_id = true;
+ return 0;
+ }
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
+ if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0) {
+ args->have_build_id = true;
+ dso__set_has_build_id(dso);
+ } else if (errno == ENOENT && dso__nsinfo(dso)) {
+ char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
+
+ if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0) {
+ args->have_build_id = true;
+ dso__set_has_build_id(dso);
}
- nsinfo__mountns_exit(&nsc);
+ free(new_name);
}
+ nsinfo__mountns_exit(&nsc);
+ return 0;
+}
- return have_build_id;
+bool dsos__read_build_ids(struct dsos *dsos, bool with_hits)
+{
+ struct dsos__read_build_ids_cb_args args = {
+ .with_hits = with_hits,
+ .have_build_id = false,
+ };
+
+ dsos__for_each_dso(dsos, dsos__read_build_ids_cb, &args);
+ return args.have_build_id;
}
-static int __dso__cmp_long_name(const char *long_name, struct dso_id *id, struct dso *b)
+static int __dso__cmp_long_name(const char *long_name, const struct dso_id *id,
+ const struct dso *b)
{
- int rc = strcmp(long_name, b->long_name);
- return rc ?: dso_id__cmp(id, &b->id);
+ int rc = strcmp(long_name, dso__long_name(b));
+ return rc ?: dso_id__cmp(id, dso__id_const(b));
}
-static int __dso__cmp_short_name(const char *short_name, struct dso_id *id, struct dso *b)
+static int __dso__cmp_short_name(const char *short_name, const struct dso_id *id,
+ const struct dso *b)
{
- int rc = strcmp(short_name, b->short_name);
- return rc ?: dso_id__cmp(id, &b->id);
+ int rc = strcmp(short_name, dso__short_name(b));
+ return rc ?: dso_id__cmp(id, dso__id_const(b));
}
-static int dso__cmp_short_name(struct dso *a, struct dso *b)
+static int dsos__cmp_long_name_id_short_name(const void *va, const void *vb)
{
- return __dso__cmp_short_name(a->short_name, &a->id, b);
+ const struct dso *a = *((const struct dso **)va);
+ const struct dso *b = *((const struct dso **)vb);
+ int rc = strcmp(dso__long_name(a), dso__long_name(b));
+
+ if (!rc) {
+ rc = dso_id__cmp(dso__id_const(a), dso__id_const(b));
+ if (!rc)
+ rc = strcmp(dso__short_name(a), dso__short_name(b));
+ }
+ return rc;
+}
+
+struct dsos__key {
+ const char *long_name;
+ const struct dso_id *id;
+};
+
+static int dsos__cmp_key_long_name_id(const void *vkey, const void *vdso)
+{
+ const struct dsos__key *key = vkey;
+ const struct dso *dso = *((const struct dso **)vdso);
+
+ return __dso__cmp_long_name(key->long_name, key->id, dso);
}
/*
@@ -128,110 +153,137 @@ static int dso__cmp_short_name(struct dso *a, struct dso *b)
* Either one of the dso or name parameter must be non-NULL or the
* function will not work.
*/
-struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
- const char *name, struct dso_id *id)
+static struct dso *__dsos__find_by_longname_id(struct dsos *dsos,
+ const char *name,
+ struct dso_id *id,
+ bool write_locked)
{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
-
- if (!name)
- name = dso->long_name;
- /*
- * Find node with the matching name
- */
- while (*p) {
- struct dso *this = rb_entry(*p, struct dso, rb_node);
- int rc = __dso__cmp_long_name(name, id, this);
-
- parent = *p;
- if (rc == 0) {
- /*
- * In case the new DSO is a duplicate of an existing
- * one, print a one-time warning & put the new entry
- * at the end of the list of duplicates.
- */
- if (!dso || (dso == this))
- return this; /* Find matching dso */
- /*
- * The core kernel DSOs may have duplicated long name.
- * In this case, the short name should be different.
- * Comparing the short names to differentiate the DSOs.
- */
- rc = dso__cmp_short_name(dso, this);
- if (rc == 0) {
- pr_err("Duplicated dso name: %s\n", name);
- return NULL;
- }
+ struct dsos__key key = {
+ .long_name = name,
+ .id = id,
+ };
+ struct dso **res;
+
+ if (!dsos->sorted) {
+ if (!write_locked) {
+ struct dso *dso;
+
+ up_read(&dsos->lock);
+ down_write(&dsos->lock);
+ dso = __dsos__find_by_longname_id(dsos, name, id,
+ /*write_locked=*/true);
+ up_write(&dsos->lock);
+ down_read(&dsos->lock);
+ return dso;
}
- if (rc < 0)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
+ qsort(dsos->dsos, dsos->cnt, sizeof(struct dso *),
+ dsos__cmp_long_name_id_short_name);
+ dsos->sorted = true;
}
- if (dso) {
- /* Add new node and rebalance tree */
- rb_link_node(&dso->rb_node, parent, p);
- rb_insert_color(&dso->rb_node, root);
- dso->root = root;
- }
- return NULL;
+
+ res = bsearch(&key, dsos->dsos, dsos->cnt, sizeof(struct dso *),
+ dsos__cmp_key_long_name_id);
+ if (!res)
+ return NULL;
+
+ return dso__get(*res);
}
-void __dsos__add(struct dsos *dsos, struct dso *dso)
+int __dsos__add(struct dsos *dsos, struct dso *dso)
{
- list_add_tail(&dso->node, &dsos->head);
- __dsos__findnew_link_by_longname_id(&dsos->root, dso, NULL, &dso->id);
- /*
- * It is now in the linked list, grab a reference, then garbage collect
- * this when needing memory, by looking at LRU dso instances in the
- * list with atomic_read(&dso->refcnt) == 1, i.e. no references
- * anywhere besides the one for the list, do, under a lock for the
- * list: remove it from the list, then a dso__put(), that probably will
- * be the last and will then call dso__delete(), end of life.
- *
- * That, or at the end of the 'struct machine' lifetime, when all
- * 'struct dso' instances will be removed from the list, in
- * dsos__exit(), if they have no other reference from some other data
- * structure.
- *
- * E.g.: after processing a 'perf.data' file and storing references
- * to objects instantiated while processing events, we will have
- * references to the 'thread', 'map', 'dso' structs all from 'struct
- * hist_entry' instances, but we may not need anything not referenced,
- * so we might as well call machines__exit()/machines__delete() and
- * garbage collect it.
- */
- dso__get(dso);
+ if (dsos->cnt == dsos->allocated) {
+ unsigned int to_allocate = 2;
+ struct dso **temp;
+
+ if (dsos->allocated > 0)
+ to_allocate = dsos->allocated * 2;
+ temp = realloc(dsos->dsos, sizeof(struct dso *) * to_allocate);
+ if (!temp)
+ return -ENOMEM;
+ dsos->dsos = temp;
+ dsos->allocated = to_allocate;
+ }
+ if (!dsos->sorted) {
+ dsos->dsos[dsos->cnt++] = dso__get(dso);
+ } else {
+ int low = 0, high = dsos->cnt - 1;
+ int insert = dsos->cnt; /* Default to inserting at the end. */
+
+ while (low <= high) {
+ int mid = low + (high - low) / 2;
+ int cmp = dsos__cmp_long_name_id_short_name(&dsos->dsos[mid], &dso);
+
+ if (cmp < 0) {
+ low = mid + 1;
+ } else {
+ high = mid - 1;
+ insert = mid;
+ }
+ }
+ memmove(&dsos->dsos[insert + 1], &dsos->dsos[insert],
+ (dsos->cnt - insert) * sizeof(struct dso *));
+ dsos->cnt++;
+ dsos->dsos[insert] = dso__get(dso);
+ }
+ dso__set_dsos(dso, dsos);
+ return 0;
}
-void dsos__add(struct dsos *dsos, struct dso *dso)
+int dsos__add(struct dsos *dsos, struct dso *dso)
{
+ int ret;
+
down_write(&dsos->lock);
- __dsos__add(dsos, dso);
+ ret = __dsos__add(dsos, dso);
up_write(&dsos->lock);
+ return ret;
}
-static struct dso *__dsos__findnew_by_longname_id(struct rb_root *root, const char *name, struct dso_id *id)
+struct dsos__find_id_cb_args {
+ const char *name;
+ struct dso_id *id;
+ struct dso *res;
+};
+
+static int dsos__find_id_cb(struct dso *dso, void *data)
{
- return __dsos__findnew_link_by_longname_id(root, NULL, name, id);
+ struct dsos__find_id_cb_args *args = data;
+
+ if (__dso__cmp_short_name(args->name, args->id, dso) == 0) {
+ args->res = dso__get(dso);
+ return 1;
+ }
+ return 0;
+
}
-static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id, bool cmp_short)
+static struct dso *__dsos__find_id(struct dsos *dsos, const char *name, struct dso_id *id,
+ bool cmp_short, bool write_locked)
{
- struct dso *pos;
+ struct dso *res;
if (cmp_short) {
- list_for_each_entry(pos, &dsos->head, node)
- if (__dso__cmp_short_name(name, id, pos) == 0)
- return pos;
- return NULL;
+ struct dsos__find_id_cb_args args = {
+ .name = name,
+ .id = id,
+ .res = NULL,
+ };
+
+ __dsos__for_each_dso(dsos, dsos__find_id_cb, &args);
+ return args.res;
}
- return __dsos__findnew_by_longname_id(&dsos->root, name, id);
+ res = __dsos__find_by_longname_id(dsos, name, id, write_locked);
+ return res;
}
-struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
+struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
{
- return __dsos__find_id(dsos, name, NULL, cmp_short);
+ struct dso *res;
+
+ down_read(&dsos->lock);
+ res = __dsos__find_id(dsos, name, NULL, cmp_short, /*write_locked=*/false);
+ up_read(&dsos->lock);
+ return res;
}
static void dso__set_basename(struct dso *dso)
@@ -239,7 +291,7 @@ static void dso__set_basename(struct dso *dso)
char *base, *lname;
int tid;
- if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
+ if (sscanf(dso__long_name(dso), "/tmp/perf-%d.map", &tid) == 1) {
if (asprintf(&base, "[JIT] tid %d", tid) < 0)
return;
} else {
@@ -247,7 +299,7 @@ static void dso__set_basename(struct dso *dso)
* basename() may modify path buffer, so we must pass
* a copy.
*/
- lname = strdup(dso->long_name);
+ lname = strdup(dso__long_name(dso));
if (!lname)
return;
@@ -271,25 +323,23 @@ static struct dso *__dsos__addnew_id(struct dsos *dsos, const char *name, struct
struct dso *dso = dso__new_id(name, id);
if (dso != NULL) {
- __dsos__add(dsos, dso);
+ /*
+ * The dsos lock is held on entry, so rename the dso before
+ * adding it to avoid needing to take the dsos lock again to say
+ * the array isn't sorted.
+ */
dso__set_basename(dso);
- /* Put dso here because __dsos_add already got it */
- dso__put(dso);
+ __dsos__add(dsos, dso);
}
return dso;
}
-struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
-{
- return __dsos__addnew_id(dsos, name, NULL);
-}
-
static struct dso *__dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id)
{
- struct dso *dso = __dsos__find_id(dsos, name, id, false);
+ struct dso *dso = __dsos__find_id(dsos, name, id, false, /*write_locked=*/true);
- if (dso && dso_id__empty(&dso->id) && !dso_id__empty(id))
- dso__inject_id(dso, id);
+ if (dso && dso_id__empty(dso__id(dso)) && !dso_id__empty(id))
+ __dso__inject_id(dso, id);
return dso ? dso : __dsos__addnew_id(dsos, name, id);
}
@@ -298,36 +348,151 @@ struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id
{
struct dso *dso;
down_write(&dsos->lock);
- dso = dso__get(__dsos__findnew_id(dsos, name, id));
+ dso = __dsos__findnew_id(dsos, name, id);
up_write(&dsos->lock);
return dso;
}
-size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
- bool (skip)(struct dso *dso, int parm), int parm)
+struct dsos__fprintf_buildid_cb_args {
+ FILE *fp;
+ bool (*skip)(struct dso *dso, int parm);
+ int parm;
+ size_t ret;
+};
+
+static int dsos__fprintf_buildid_cb(struct dso *dso, void *data)
{
- struct dso *pos;
- size_t ret = 0;
+ struct dsos__fprintf_buildid_cb_args *args = data;
+ char sbuild_id[SBUILD_ID_SIZE];
- list_for_each_entry(pos, head, node) {
- char sbuild_id[SBUILD_ID_SIZE];
+ if (args->skip && args->skip(dso, args->parm))
+ return 0;
+ build_id__sprintf(dso__bid(dso), sbuild_id);
+ args->ret += fprintf(args->fp, "%-40s %s\n", sbuild_id, dso__long_name(dso));
+ return 0;
+}
- if (skip && skip(pos, parm))
- continue;
- build_id__sprintf(&pos->bid, sbuild_id);
- ret += fprintf(fp, "%-40s %s\n", sbuild_id, pos->long_name);
- }
- return ret;
+size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp,
+ bool (*skip)(struct dso *dso, int parm), int parm)
+{
+ struct dsos__fprintf_buildid_cb_args args = {
+ .fp = fp,
+ .skip = skip,
+ .parm = parm,
+ .ret = 0,
+ };
+
+ dsos__for_each_dso(dsos, dsos__fprintf_buildid_cb, &args);
+ return args.ret;
+}
+
+struct dsos__fprintf_cb_args {
+ FILE *fp;
+ size_t ret;
+};
+
+static int dsos__fprintf_cb(struct dso *dso, void *data)
+{
+ struct dsos__fprintf_cb_args *args = data;
+
+ args->ret += dso__fprintf(dso, args->fp);
+ return 0;
+}
+
+size_t dsos__fprintf(struct dsos *dsos, FILE *fp)
+{
+ struct dsos__fprintf_cb_args args = {
+ .fp = fp,
+ .ret = 0,
+ };
+
+ dsos__for_each_dso(dsos, dsos__fprintf_cb, &args);
+ return args.ret;
+}
+
+static int dsos__hit_all_cb(struct dso *dso, void *data __maybe_unused)
+{
+ dso__set_hit(dso);
+ return 0;
}
-size_t __dsos__fprintf(struct list_head *head, FILE *fp)
+int dsos__hit_all(struct dsos *dsos)
{
- struct dso *pos;
- size_t ret = 0;
+ return dsos__for_each_dso(dsos, dsos__hit_all_cb, NULL);
+}
- list_for_each_entry(pos, head, node) {
- ret += dso__fprintf(pos, fp);
+struct dso *dsos__findnew_module_dso(struct dsos *dsos,
+ struct machine *machine,
+ struct kmod_path *m,
+ const char *filename)
+{
+ struct dso *dso;
+
+ down_write(&dsos->lock);
+
+ dso = __dsos__find_id(dsos, m->name, NULL, /*cmp_short=*/true, /*write_locked=*/true);
+ if (dso) {
+ up_write(&dsos->lock);
+ return dso;
+ }
+ /*
+ * Failed to find the dso so create it. Change the name before adding it
+ * to the array, to avoid unnecessary sorts and potential locking
+ * issues.
+ */
+ dso = dso__new_id(m->name, /*id=*/NULL);
+ if (!dso) {
+ up_write(&dsos->lock);
+ return NULL;
}
+ dso__set_basename(dso);
+ dso__set_module_info(dso, m, machine);
+ dso__set_long_name(dso, strdup(filename), true);
+ dso__set_kernel(dso, DSO_SPACE__KERNEL);
+ __dsos__add(dsos, dso);
- return ret;
+ up_write(&dsos->lock);
+ return dso;
+}
+
+static int dsos__find_kernel_dso_cb(struct dso *dso, void *data)
+{
+ struct dso **res = data;
+ /*
+ * The cpumode passed to is_kernel_module is not the cpumode of *this*
+ * event. If we insist on passing correct cpumode to is_kernel_module,
+ * we should record the cpumode when we adding this dso to the linked
+ * list.
+ *
+ * However we don't really need passing correct cpumode. We know the
+ * correct cpumode must be kernel mode (if not, we should not link it
+ * onto kernel_dsos list).
+ *
+ * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
+ * is_kernel_module() treats it as a kernel cpumode.
+ */
+ if (!dso__kernel(dso) ||
+ is_kernel_module(dso__long_name(dso), PERF_RECORD_MISC_CPUMODE_UNKNOWN))
+ return 0;
+
+ *res = dso__get(dso);
+ return 1;
+}
+
+struct dso *dsos__find_kernel_dso(struct dsos *dsos)
+{
+ struct dso *res = NULL;
+
+ dsos__for_each_dso(dsos, dsos__find_kernel_dso_cb, &res);
+ return res;
+}
+
+int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data)
+{
+ int err;
+
+ down_read(&dsos->lock);
+ err = __dsos__for_each_dso(dsos, cb, data);
+ up_read(&dsos->lock);
+ return err;
}
diff --git a/tools/perf/util/dsos.h b/tools/perf/util/dsos.h
index 5dbec2bc69..6c13b65648 100644
--- a/tools/perf/util/dsos.h
+++ b/tools/perf/util/dsos.h
@@ -10,31 +10,43 @@
struct dso;
struct dso_id;
+struct kmod_path;
+struct machine;
/*
- * DSOs are put into both a list for fast iteration and rbtree for fast
- * long name lookup.
+ * Collection of DSOs as an array for iteration speed, but sorted for O(n)
+ * lookup.
*/
struct dsos {
- struct list_head head;
- struct rb_root root; /* rbtree root sorted by long name */
struct rw_semaphore lock;
+ struct dso **dsos;
+ unsigned int cnt;
+ unsigned int allocated;
+ bool sorted;
};
-void __dsos__add(struct dsos *dsos, struct dso *dso);
-void dsos__add(struct dsos *dsos, struct dso *dso);
-struct dso *__dsos__addnew(struct dsos *dsos, const char *name);
-struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
+void dsos__init(struct dsos *dsos);
+void dsos__exit(struct dsos *dsos);
+
+int __dsos__add(struct dsos *dsos, struct dso *dso);
+int dsos__add(struct dsos *dsos, struct dso *dso);
+struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
struct dso *dsos__findnew_id(struct dsos *dsos, const char *name, struct dso_id *id);
-struct dso *__dsos__findnew_link_by_longname_id(struct rb_root *root, struct dso *dso,
- const char *name, struct dso_id *id);
-
-bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
+bool dsos__read_build_ids(struct dsos *dsos, bool with_hits);
-size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
+size_t dsos__fprintf_buildid(struct dsos *dsos, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm);
-size_t __dsos__fprintf(struct list_head *head, FILE *fp);
+size_t dsos__fprintf(struct dsos *dsos, FILE *fp);
+
+int dsos__hit_all(struct dsos *dsos);
+
+struct dso *dsos__findnew_module_dso(struct dsos *dsos, struct machine *machine,
+ struct kmod_path *m, const char *filename);
+
+struct dso *dsos__find_kernel_dso(struct dsos *dsos);
+
+int dsos__for_each_dso(struct dsos *dsos, int (*cb)(struct dso *dso, void *data), void *data);
#endif /* __PERF_DSOS */
diff --git a/tools/perf/util/dump-insn.h b/tools/perf/util/dump-insn.h
index 6501250615..4a7797dd6d 100644
--- a/tools/perf/util/dump-insn.h
+++ b/tools/perf/util/dump-insn.h
@@ -11,6 +11,7 @@ struct thread;
struct perf_insn {
/* Initialized by callers: */
struct thread *thread;
+ struct machine *machine;
u8 cpumode;
bool is64bit;
int cpu;
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index f93e57e2fc..44ef968a7a 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -9,6 +9,7 @@
#include <stdlib.h>
#include "debug.h"
#include "dwarf-aux.h"
+#include "dwarf-regs.h"
#include "strbuf.h"
#include "string2.h"
@@ -696,6 +697,49 @@ Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
return die_mem;
}
+static int __die_find_func_rettype_cb(Dwarf_Die *die_mem, void *data)
+{
+ const char *func_name;
+
+ if (dwarf_tag(die_mem) != DW_TAG_subprogram)
+ return DIE_FIND_CB_SIBLING;
+
+ func_name = dwarf_diename(die_mem);
+ if (func_name && !strcmp(func_name, data))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_find_func_rettype - Search a return type of function
+ * @cu_die: a CU DIE
+ * @name: target function name
+ * @die_mem: a buffer for result DIE
+ *
+ * Search a non-inlined function which matches to @name and stores the
+ * return type of the function to @die_mem and returns it if found.
+ * Returns NULL if failed. Note that it doesn't needs to find a
+ * definition of the function, so it doesn't match with address.
+ * Most likely, it can find a declaration at the top level. Thus the
+ * callback function continues to sibling entries only.
+ */
+Dwarf_Die *die_find_func_rettype(Dwarf_Die *cu_die, const char *name,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die tmp_die;
+
+ cu_die = die_find_child(cu_die, __die_find_func_rettype_cb,
+ (void *)name, &tmp_die);
+ if (!cu_die)
+ return NULL;
+
+ if (die_get_real_type(&tmp_die, die_mem) == NULL)
+ return NULL;
+
+ return die_mem;
+}
+
struct __instance_walk_param {
void *addr;
int (*callback)(Dwarf_Die *, void *);
@@ -1066,8 +1110,10 @@ int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf)
const char *tmp = "";
tag = dwarf_tag(type_die);
- if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type)
+ if (tag == DW_TAG_pointer_type)
tmp = "*";
+ else if (tag == DW_TAG_array_type)
+ tmp = "[]";
else if (tag == DW_TAG_subroutine_type) {
/* Function pointer */
return strbuf_add(buf, "(function_type)", 15);
@@ -1147,6 +1193,8 @@ static int reg_from_dwarf_op(Dwarf_Op *op)
case DW_OP_regx:
case DW_OP_bregx:
return op->number;
+ case DW_OP_fbreg:
+ return DWARF_REG_FB;
default:
break;
}
@@ -1160,6 +1208,7 @@ static int offset_from_dwarf_op(Dwarf_Op *op)
case DW_OP_regx:
return 0;
case DW_OP_breg0 ... DW_OP_breg31:
+ case DW_OP_fbreg:
return op->number;
case DW_OP_bregx:
return op->number2;
@@ -1353,6 +1402,9 @@ static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data,
return true;
}
+ if (addr_offset < addr_type)
+ return false;
+
if (die_get_real_type(die_mem, &type_die) == NULL)
return false;
@@ -1399,7 +1451,6 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg)
/* Local variables accessed using frame base register */
if (data->is_fbreg && ops->atom == DW_OP_fbreg &&
- data->offset >= (int)ops->number &&
check_allowed_ops(ops, nops) &&
match_var_offset(die_mem, data, data->offset, ops->number,
/*is_pointer=*/false))
@@ -1490,9 +1541,6 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
if (ops->atom != DW_OP_addr)
continue;
- if (data->addr < ops->number)
- continue;
-
if (check_allowed_ops(ops, nops) &&
match_var_offset(die_mem, data, data->addr, ops->number,
/*is_pointer=*/false))
@@ -1504,7 +1552,6 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
/**
* die_find_variable_by_addr - Find variable located at given address
* @sc_die: a scope DIE
- * @pc: the program address to find
* @addr: the data address to find
* @die_mem: a buffer to save the resulting DIE
* @offset: the offset in the resulting type
@@ -1512,12 +1559,10 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg)
* Find the variable DIE located at the given address (in PC-relative mode).
* This is usually for global variables.
*/
-Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
- Dwarf_Addr addr, Dwarf_Die *die_mem,
- int *offset)
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem, int *offset)
{
struct find_var_data data = {
- .pc = pc,
.addr = addr,
};
Dwarf_Die *result;
@@ -1587,6 +1632,68 @@ void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types)
die_find_child(sc_die, __die_collect_vars_cb, (void *)var_types, &die_mem);
}
+
+static int __die_collect_global_vars_cb(Dwarf_Die *die_mem, void *arg)
+{
+ struct die_var_type **var_types = arg;
+ Dwarf_Die type_die;
+ int tag = dwarf_tag(die_mem);
+ Dwarf_Attribute attr;
+ Dwarf_Addr base, start, end;
+ Dwarf_Op *ops;
+ size_t nops;
+ struct die_var_type *vt;
+
+ if (tag != DW_TAG_variable)
+ return DIE_FIND_CB_SIBLING;
+
+ if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ /* Only collect the location with an absolute address. */
+ if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0)
+ return DIE_FIND_CB_SIBLING;
+
+ if (ops->atom != DW_OP_addr)
+ return DIE_FIND_CB_SIBLING;
+
+ if (!check_allowed_ops(ops, nops))
+ return DIE_FIND_CB_SIBLING;
+
+ if (die_get_real_type(die_mem, &type_die) == NULL)
+ return DIE_FIND_CB_SIBLING;
+
+ vt = malloc(sizeof(*vt));
+ if (vt == NULL)
+ return DIE_FIND_CB_END;
+
+ vt->die_off = dwarf_dieoffset(&type_die);
+ vt->addr = ops->number;
+ vt->reg = -1;
+ vt->offset = 0;
+ vt->next = *var_types;
+ *var_types = vt;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_collect_global_vars - Save all global variables
+ * @cu_die: a CU DIE
+ * @var_types: a pointer to save the resulting list
+ *
+ * Save all global variables in the @cu_die and save them to @var_types.
+ * The @var_types is a singly-linked list containing type and location info.
+ * Actual type can be retrieved using dwarf_offdie() with 'die_off' later.
+ *
+ * Callers should free @var_types.
+ */
+void die_collect_global_vars(Dwarf_Die *cu_die, struct die_var_type **var_types)
+{
+ Dwarf_Die die_mem;
+
+ die_find_child(cu_die, __die_collect_global_vars_cb, (void *)var_types, &die_mem);
+}
#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
#ifdef HAVE_DWARF_CFI_SUPPORT
@@ -1855,3 +1962,116 @@ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes)
*scopes = data.scopes;
return data.nr;
}
+
+static int __die_find_member_offset_cb(Dwarf_Die *die_mem, void *arg)
+{
+ Dwarf_Die type_die;
+ Dwarf_Word size, loc;
+ Dwarf_Word offset = (long)arg;
+ int tag = dwarf_tag(die_mem);
+
+ if (tag != DW_TAG_member)
+ return DIE_FIND_CB_SIBLING;
+
+ /* Unions might not have location */
+ if (die_get_data_member_location(die_mem, &loc) < 0)
+ loc = 0;
+
+ if (offset == loc)
+ return DIE_FIND_CB_END;
+
+ if (die_get_real_type(die_mem, &type_die) == NULL) {
+ // TODO: add a pr_debug_dtp() later for this unlikely failure
+ return DIE_FIND_CB_SIBLING;
+ }
+
+ if (dwarf_aggregate_size(&type_die, &size) < 0)
+ size = 0;
+
+ if (loc < offset && offset < (loc + size))
+ return DIE_FIND_CB_END;
+
+ return DIE_FIND_CB_SIBLING;
+}
+
+/**
+ * die_get_member_type - Return type info of struct member
+ * @type_die: a type DIE
+ * @offset: offset in the type
+ * @die_mem: a buffer to save the resulting DIE
+ *
+ * This function returns a type of a member in @type_die where it's located at
+ * @offset if it's a struct. For now, it just returns the first matching
+ * member in a union. For other types, it'd return the given type directly
+ * if it's within the size of the type or NULL otherwise.
+ */
+Dwarf_Die *die_get_member_type(Dwarf_Die *type_die, int offset,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die *member;
+ Dwarf_Die mb_type;
+ int tag;
+
+ tag = dwarf_tag(type_die);
+ /* If it's not a compound type, return the type directly */
+ if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) {
+ Dwarf_Word size;
+
+ if (dwarf_aggregate_size(type_die, &size) < 0)
+ size = 0;
+
+ if ((unsigned)offset >= size)
+ return NULL;
+
+ *die_mem = *type_die;
+ return die_mem;
+ }
+
+ mb_type = *type_die;
+ /* TODO: Handle union types better? */
+ while (tag == DW_TAG_structure_type || tag == DW_TAG_union_type) {
+ member = die_find_child(&mb_type, __die_find_member_offset_cb,
+ (void *)(long)offset, die_mem);
+ if (member == NULL)
+ return NULL;
+
+ if (die_get_real_type(member, &mb_type) == NULL)
+ return NULL;
+
+ tag = dwarf_tag(&mb_type);
+
+ if (tag == DW_TAG_structure_type || tag == DW_TAG_union_type) {
+ Dwarf_Word loc;
+
+ /* Update offset for the start of the member struct */
+ if (die_get_data_member_location(member, &loc) == 0)
+ offset -= loc;
+ }
+ }
+ *die_mem = mb_type;
+ return die_mem;
+}
+
+/**
+ * die_deref_ptr_type - Return type info for pointer access
+ * @ptr_die: a pointer type DIE
+ * @offset: access offset for the pointer
+ * @die_mem: a buffer to save the resulting DIE
+ *
+ * This function follows the pointer in @ptr_die with given @offset
+ * and saves the resulting type in @die_mem. If the pointer points
+ * a struct type, actual member at the offset would be returned.
+ */
+Dwarf_Die *die_deref_ptr_type(Dwarf_Die *ptr_die, int offset,
+ Dwarf_Die *die_mem)
+{
+ Dwarf_Die type_die;
+
+ if (dwarf_tag(ptr_die) != DW_TAG_pointer_type)
+ return NULL;
+
+ if (die_get_real_type(ptr_die, &type_die) == NULL)
+ return NULL;
+
+ return die_get_member_type(&type_die, offset, die_mem);
+}
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index efafd3a1f5..24446412b8 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -94,6 +94,10 @@ Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
Dwarf_Die *die_find_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
Dwarf_Die *die_mem);
+/* Search a non-inlined function by name and returns its return type */
+Dwarf_Die *die_find_func_rettype(Dwarf_Die *sp_die, const char *name,
+ Dwarf_Die *die_mem);
+
/* Walk on the instances of given DIE */
int die_walk_instances(Dwarf_Die *in_die,
int (*callback)(Dwarf_Die *, void *), void *data);
@@ -144,6 +148,12 @@ struct die_var_type {
int offset;
};
+/* Return type info of a member at offset */
+Dwarf_Die *die_get_member_type(Dwarf_Die *type_die, int offset, Dwarf_Die *die_mem);
+
+/* Return type info where the pointer and offset point to */
+Dwarf_Die *die_deref_ptr_type(Dwarf_Die *ptr_die, int offset, Dwarf_Die *die_mem);
+
#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT
/* Get byte offset range of given variable DIE */
@@ -155,13 +165,15 @@ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg,
Dwarf_Die *die_mem);
/* Find a (global) variable located in the 'addr' */
-Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc,
- Dwarf_Addr addr, Dwarf_Die *die_mem,
- int *offset);
+Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr addr,
+ Dwarf_Die *die_mem, int *offset);
/* Save all variables and parameters in this scope */
void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types);
+/* Save all global variables in this CU */
+void die_collect_global_vars(Dwarf_Die *cu_die, struct die_var_type **var_types);
+
#else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
@@ -182,7 +194,6 @@ static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unus
}
static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unused,
- Dwarf_Addr pc __maybe_unused,
Dwarf_Addr addr __maybe_unused,
Dwarf_Die *die_mem __maybe_unused,
int *offset __maybe_unused)
@@ -195,6 +206,11 @@ static inline void die_collect_vars(Dwarf_Die *sc_die __maybe_unused,
{
}
+static inline void die_collect_global_vars(Dwarf_Die *cu_die __maybe_unused,
+ struct die_var_type **var_types __maybe_unused)
+{
+}
+
#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */
#ifdef HAVE_DWARF_CFI_SUPPORT
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 198903157f..f32f9abf63 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -726,7 +726,7 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
dso = al->map ? map__dso(al->map) : NULL;
dump_printf(" ...... dso: %s\n",
dso
- ? dso->long_name
+ ? dso__long_name(dso)
: (al->level == 'H' ? "[hypervisor]" : "<not found>"));
if (thread__is_filtered(thread))
@@ -750,10 +750,10 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
if (al->map) {
if (symbol_conf.dso_list &&
(!dso || !(strlist__has_entry(symbol_conf.dso_list,
- dso->short_name) ||
- (dso->short_name != dso->long_name &&
+ dso__short_name(dso)) ||
+ (dso__short_name(dso) != dso__long_name(dso) &&
strlist__has_entry(symbol_conf.dso_list,
- dso->long_name))))) {
+ dso__long_name(dso)))))) {
al->filtered |= (1 << HIST_FILTER__DSO);
}
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 55a300a097..3a719edafc 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -298,7 +298,8 @@ struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide)
#ifdef HAVE_LIBTRACEEVENT
struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide)
{
- struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0);
+ struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0,
+ /*format=*/true);
if (IS_ERR(evsel))
return evsel;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 3536404e94..4f818ab6b6 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -452,7 +452,7 @@ out_err:
* Returns pointer with encoded error via <linux/err.h> interface.
*/
#ifdef HAVE_LIBTRACEEVENT
-struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format)
{
struct evsel *evsel = zalloc(perf_evsel__object.size);
int err = -ENOMEM;
@@ -469,14 +469,20 @@ struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
goto out_free;
- evsel->tp_format = trace_event__tp_format(sys, name);
- if (IS_ERR(evsel->tp_format)) {
- err = PTR_ERR(evsel->tp_format);
- goto out_free;
+ event_attr_init(&attr);
+
+ if (format) {
+ evsel->tp_format = trace_event__tp_format(sys, name);
+ if (IS_ERR(evsel->tp_format)) {
+ err = PTR_ERR(evsel->tp_format);
+ goto out_free;
+ }
+ attr.config = evsel->tp_format->id;
+ } else {
+ attr.config = (__u64) -1;
}
- event_attr_init(&attr);
- attr.config = evsel->tp_format->id;
+
attr.sample_period = 1;
evsel__init(evsel, &attr, idx);
}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 517cff431d..375a38e15c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -234,14 +234,14 @@ void free_config_terms(struct list_head *config_terms);
#ifdef HAVE_LIBTRACEEVENT
-struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx);
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format);
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
static inline struct evsel *evsel__newtp(const char *sys, const char *name)
{
- return evsel__newtp_idx(sys, name, 0);
+ return evsel__newtp_idx(sys, name, 0, true);
}
#endif
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
index 5f18d20ea9..4e2e4f40e1 100644
--- a/tools/perf/util/genelf.h
+++ b/tools/perf/util/genelf.h
@@ -43,6 +43,9 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
#elif defined(__riscv) && __riscv_xlen == 64
#define GEN_ELF_ARCH EM_RISCV
#define GEN_ELF_CLASS ELFCLASS64
+#elif defined(__riscv) && __riscv_xlen == 32
+#define GEN_ELF_ARCH EM_RISCV
+#define GEN_ELF_CLASS ELFCLASS32
#elif defined(__loongarch__)
#define GEN_ELF_ARCH EM_LOONGARCH
#define GEN_ELF_CLASS ELFCLASS64
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3fe28edc3d..55e9553861 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2308,7 +2308,7 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
build_id__init(&bid, bev->data, size);
dso__set_build_id(dso, &bid);
- dso->header_build_id = 1;
+ dso__set_header_build_id(dso, true);
if (dso_space != DSO_SPACE__USER) {
struct kmod_path m = { .name = NULL, };
@@ -2316,13 +2316,13 @@ static int __event_process_build_id(struct perf_record_header_build_id *bev,
if (!kmod_path__parse_name(&m, filename) && m.kmod)
dso__set_module_info(dso, &m, machine);
- dso->kernel = dso_space;
+ dso__set_kernel(dso, dso_space);
free(m.name);
}
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
pr_debug("build id event received for %s: %s [%zu]\n",
- dso->long_name, sbuild_id, size);
+ dso__long_name(dso), sbuild_id, size);
dso__put(dso);
}
diff --git a/tools/perf/util/help-unknown-cmd.c b/tools/perf/util/help-unknown-cmd.c
index eab99ea6ac..a0a46e34f8 100644
--- a/tools/perf/util/help-unknown-cmd.c
+++ b/tools/perf/util/help-unknown-cmd.c
@@ -52,46 +52,48 @@ static int add_cmd_list(struct cmdnames *cmds, struct cmdnames *old)
return 0;
}
-const char *help_unknown_cmd(const char *cmd)
+const char *help_unknown_cmd(const char *cmd, struct cmdnames *main_cmds)
{
unsigned int i, n = 0, best_similarity = 0;
- struct cmdnames main_cmds, other_cmds;
+ struct cmdnames other_cmds;
- memset(&main_cmds, 0, sizeof(main_cmds));
- memset(&other_cmds, 0, sizeof(main_cmds));
+ memset(&other_cmds, 0, sizeof(other_cmds));
perf_config(perf_unknown_cmd_config, NULL);
- load_command_list("perf-", &main_cmds, &other_cmds);
+ load_command_list("perf-", main_cmds, &other_cmds);
- if (add_cmd_list(&main_cmds, &other_cmds) < 0) {
+ if (add_cmd_list(main_cmds, &other_cmds) < 0) {
fprintf(stderr, "ERROR: Failed to allocate command list for unknown command.\n");
goto end;
}
- qsort(main_cmds.names, main_cmds.cnt,
- sizeof(main_cmds.names), cmdname_compare);
- uniq(&main_cmds);
+ qsort(main_cmds->names, main_cmds->cnt,
+ sizeof(main_cmds->names), cmdname_compare);
+ uniq(main_cmds);
- if (main_cmds.cnt) {
+ if (main_cmds->cnt) {
/* This reuses cmdname->len for similarity index */
- for (i = 0; i < main_cmds.cnt; ++i)
- main_cmds.names[i]->len =
- levenshtein(cmd, main_cmds.names[i]->name, 0, 2, 1, 4);
-
- qsort(main_cmds.names, main_cmds.cnt,
- sizeof(*main_cmds.names), levenshtein_compare);
+ for (i = 0; i < main_cmds->cnt; ++i) {
+ main_cmds->names[i]->len =
+ levenshtein(cmd, main_cmds->names[i]->name,
+ /*swap_penalty=*/0,
+ /*substition_penality=*/2,
+ /*insertion_penality=*/1,
+ /*deletion_penalty=*/1);
+ }
+ qsort(main_cmds->names, main_cmds->cnt,
+ sizeof(*main_cmds->names), levenshtein_compare);
- best_similarity = main_cmds.names[0]->len;
+ best_similarity = main_cmds->names[0]->len;
n = 1;
- while (n < main_cmds.cnt && best_similarity == main_cmds.names[n]->len)
+ while (n < main_cmds->cnt && best_similarity == main_cmds->names[n]->len)
++n;
}
if (autocorrect && n == 1) {
- const char *assumed = main_cmds.names[0]->name;
+ const char *assumed = main_cmds->names[0]->name;
- main_cmds.names[0] = NULL;
- clean_cmdnames(&main_cmds);
+ main_cmds->names[0] = NULL;
clean_cmdnames(&other_cmds);
fprintf(stderr, "WARNING: You called a perf program named '%s', "
"which does not exist.\n"
@@ -107,15 +109,14 @@ const char *help_unknown_cmd(const char *cmd)
fprintf(stderr, "perf: '%s' is not a perf-command. See 'perf --help'.\n", cmd);
- if (main_cmds.cnt && best_similarity < 6) {
+ if (main_cmds->cnt && best_similarity < 6) {
fprintf(stderr, "\nDid you mean %s?\n",
n < 2 ? "this": "one of these");
for (i = 0; i < n; i++)
- fprintf(stderr, "\t%s\n", main_cmds.names[i]->name);
+ fprintf(stderr, "\t%s\n", main_cmds->names[i]->name);
}
end:
- clean_cmdnames(&main_cmds);
clean_cmdnames(&other_cmds);
- exit(1);
+ return NULL;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index fa359180eb..2e9e193179 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -9,6 +9,7 @@
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "session.h"
#include "namespaces.h"
#include "cgroup.h"
@@ -153,8 +154,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
}
if (h->mem_info) {
- if (h->mem_info->daddr.ms.sym) {
- symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
+ if (mem_info__daddr(h->mem_info)->ms.sym) {
+ symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
@@ -168,8 +169,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen);
}
- if (h->mem_info->iaddr.ms.sym) {
- symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
+ if (mem_info__iaddr(h->mem_info)->ms.sym) {
+ symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
+ unresolved_col_width + 2;
hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
symlen);
@@ -179,8 +180,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen);
}
- if (h->mem_info->daddr.ms.map) {
- symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
+ if (mem_info__daddr(h->mem_info)->ms.map) {
+ symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
symlen);
} else {
@@ -308,6 +309,9 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->period_us += src->period_us;
dest->period_guest_sys += src->period_guest_sys;
dest->period_guest_us += src->period_guest_us;
+ dest->weight1 += src->weight1;
+ dest->weight2 += src->weight2;
+ dest->weight3 += src->weight3;
dest->nr_events += src->nr_events;
}
@@ -315,7 +319,9 @@ static void he_stat__decay(struct he_stat *he_stat)
{
he_stat->period = (he_stat->period * 7) / 8;
he_stat->nr_events = (he_stat->nr_events * 7) / 8;
- /* XXX need decay for weight too? */
+ he_stat->weight1 = (he_stat->weight1 * 7) / 8;
+ he_stat->weight2 = (he_stat->weight2 * 7) / 8;
+ he_stat->weight3 = (he_stat->weight3 * 7) / 8;
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
@@ -470,11 +476,6 @@ static int hist_entry__init(struct hist_entry *he,
he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
}
- if (he->mem_info) {
- he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
- he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
- }
-
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
callchain_init(he->callchain);
@@ -520,8 +521,8 @@ err_infos:
zfree(&he->branch_info);
}
if (he->mem_info) {
- map_symbol__exit(&he->mem_info->iaddr.ms);
- map_symbol__exit(&he->mem_info->daddr.ms);
+ map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
+ map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
}
err:
map_symbol__exit(&he->ms);
@@ -566,7 +567,6 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
he = NULL;
}
}
-
return he;
}
@@ -614,7 +614,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
cmp = hist_entry__cmp(he, entry);
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period);
+ he_stat__add_stat(&he->stat, &entry->stat);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
@@ -626,7 +626,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
*/
mem_info__zput(entry->mem_info);
- block_info__zput(entry->block_info);
+ block_info__delete(entry->block_info);
kvm_info__zput(entry->kvm_info);
@@ -731,12 +731,15 @@ __hists__add_entry(struct hists *hists,
.stat = {
.nr_events = 1,
.period = sample->period,
+ .weight1 = sample->weight,
+ .weight2 = sample->ins_lat,
+ .weight3 = sample->p_stage_cyc,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
- .mem_info = mi,
+ .mem_info = mem_info__get(mi),
.kvm_info = ki,
.block_info = block_info,
.transaction = sample->transaction,
@@ -825,7 +828,7 @@ iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
if (mi == NULL)
return -ENOMEM;
- iter->priv = mi;
+ iter->mi = mi;
return 0;
}
@@ -833,7 +836,7 @@ static int
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
u64 cost;
- struct mem_info *mi = iter->priv;
+ struct mem_info *mi = iter->mi;
struct hists *hists = evsel__hists(iter->evsel);
struct perf_sample *sample = iter->sample;
struct hist_entry *he;
@@ -880,12 +883,7 @@ iter_finish_mem_entry(struct hist_entry_iter *iter,
err = hist_entry__append_callchain(he, iter->sample);
out:
- /*
- * We don't need to free iter->priv (mem_info) here since the mem info
- * was either already freed in hists__findnew_entry() or passed to a
- * new hist entry by hist_entry__new().
- */
- iter->priv = NULL;
+ mem_info__zput(iter->mi);
iter->he = NULL;
return err;
@@ -904,7 +902,7 @@ iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al
iter->curr = 0;
iter->total = sample->branch_stack->nr;
- iter->priv = bi;
+ iter->bi = bi;
return 0;
}
@@ -918,7 +916,7 @@ iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
static int
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
- struct branch_info *bi = iter->priv;
+ struct branch_info *bi = iter->bi;
int i = iter->curr;
if (bi == NULL)
@@ -947,7 +945,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
int i = iter->curr;
int err = 0;
- bi = iter->priv;
+ bi = iter->bi;
if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
goto out;
@@ -976,7 +974,7 @@ static int
iter_finish_branch_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
- zfree(&iter->priv);
+ zfree(&iter->bi);
iter->he = NULL;
return iter->curr >= iter->total ? 0 : -1;
@@ -1044,7 +1042,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
if (he_cache == NULL)
return -ENOMEM;
- iter->priv = he_cache;
+ iter->he_cache = he_cache;
iter->curr = 0;
return 0;
@@ -1057,7 +1055,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
struct evsel *evsel = iter->evsel;
struct hists *hists = evsel__hists(evsel);
struct perf_sample *sample = iter->sample;
- struct hist_entry **he_cache = iter->priv;
+ struct hist_entry **he_cache = iter->he_cache;
struct hist_entry *he;
int err = 0;
@@ -1115,7 +1113,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
{
struct evsel *evsel = iter->evsel;
struct perf_sample *sample = iter->sample;
- struct hist_entry **he_cache = iter->priv;
+ struct hist_entry **he_cache = iter->he_cache;
struct hist_entry *he;
struct hist_entry he_tmp = {
.hists = evsel__hists(evsel),
@@ -1181,7 +1179,9 @@ static int
iter_finish_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
- zfree(&iter->priv);
+ mem_info__zput(iter->mi);
+ zfree(&iter->bi);
+ zfree(&iter->he_cache);
iter->he = NULL;
return 0;
@@ -1327,13 +1327,13 @@ void hist_entry__delete(struct hist_entry *he)
}
if (he->mem_info) {
- map_symbol__exit(&he->mem_info->iaddr.ms);
- map_symbol__exit(&he->mem_info->daddr.ms);
+ map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
+ map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
mem_info__zput(he->mem_info);
}
if (he->block_info)
- block_info__zput(he->block_info);
+ block_info__delete(he->block_info);
if (he->kvm_info)
kvm_info__zput(he->kvm_info);
@@ -2128,7 +2128,7 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he)
{
if (hists->dso_filter != NULL &&
- (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
+ (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
he->filtered |= (1 << HIST_FILTER__DSO);
return true;
}
@@ -2808,7 +2808,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
}
if (dso)
printed += scnprintf(bf + printed, size - printed,
- ", DSO: %s", dso->short_name);
+ ", DSO: %s", dso__short_name(dso));
if (socket_id > -1)
printed += scnprintf(bf + printed, size - printed,
", Processor Socket: %d", socket_id);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 4a0aea0c9e..8fb3bdd291 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -4,21 +4,22 @@
#include <linux/rbtree.h>
#include <linux/types.h>
-#include "evsel.h"
+#include "callchain.h"
#include "color.h"
#include "events_stats.h"
+#include "evsel.h"
+#include "map_symbol.h"
#include "mutex.h"
+#include "sample.h"
+#include "spark.h"
+#include "stat.h"
-struct hist_entry;
-struct hist_entry_ops;
struct addr_location;
-struct map_symbol;
struct mem_info;
struct kvm_info;
struct branch_info;
struct branch_stack;
struct block_info;
-struct symbol;
struct ui_progress;
enum hist_filter {
@@ -131,18 +132,20 @@ struct hist_entry_iter {
int total;
int curr;
- bool hide_unresolved;
-
struct evsel *evsel;
struct perf_sample *sample;
struct hist_entry *he;
struct symbol *parent;
- void *priv;
+
+ struct mem_info *mi;
+ struct branch_info *bi;
+ struct hist_entry **he_cache;
const struct hist_iter_ops *ops;
/* user-defined callback function (optional) */
int (*add_entry_cb)(struct hist_entry_iter *iter,
struct addr_location *al, bool single, void *arg);
+ bool hide_unresolved;
};
extern const struct hist_iter_ops hist_iter_normal;
@@ -150,6 +153,162 @@ extern const struct hist_iter_ops hist_iter_branch;
extern const struct hist_iter_ops hist_iter_mem;
extern const struct hist_iter_ops hist_iter_cumulative;
+struct res_sample {
+ u64 time;
+ int cpu;
+ int tid;
+};
+
+struct he_stat {
+ u64 period;
+ u64 period_sys;
+ u64 period_us;
+ u64 period_guest_sys;
+ u64 period_guest_us;
+ u64 weight1;
+ u64 weight2;
+ u64 weight3;
+ u32 nr_events;
+};
+
+struct namespace_id {
+ u64 dev;
+ u64 ino;
+};
+
+struct hist_entry_diff {
+ bool computed;
+ union {
+ /* PERF_HPP__DELTA */
+ double period_ratio_delta;
+
+ /* PERF_HPP__RATIO */
+ double period_ratio;
+
+ /* HISTC_WEIGHTED_DIFF */
+ s64 wdiff;
+
+ /* PERF_HPP_DIFF__CYCLES */
+ s64 cycles;
+ };
+ struct stats stats;
+ unsigned long svals[NUM_SPARKS];
+};
+
+struct hist_entry_ops {
+ void *(*new)(size_t size);
+ void (*free)(void *ptr);
+};
+
+/**
+ * struct hist_entry - histogram entry
+ *
+ * @row_offset - offset from the first callchain expanded to appear on screen
+ * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
+ */
+struct hist_entry {
+ struct rb_node rb_node_in;
+ struct rb_node rb_node;
+ union {
+ struct list_head node;
+ struct list_head head;
+ } pairs;
+ struct he_stat stat;
+ struct he_stat *stat_acc;
+ struct map_symbol ms;
+ struct thread *thread;
+ struct comm *comm;
+ struct namespace_id cgroup_id;
+ u64 cgroup;
+ u64 ip;
+ u64 transaction;
+ s32 socket;
+ s32 cpu;
+ u64 code_page_size;
+ u64 weight;
+ u64 ins_lat;
+ u64 p_stage_cyc;
+ u8 cpumode;
+ u8 depth;
+ int mem_type_off;
+ struct simd_flags simd_flags;
+
+ /* We are added by hists__add_dummy_entry. */
+ bool dummy;
+ bool leaf;
+
+ char level;
+ u8 filtered;
+
+ u16 callchain_size;
+ union {
+ /*
+ * Since perf diff only supports the stdio output, TUI
+ * fields are only accessed from perf report (or perf
+ * top). So make it a union to reduce memory usage.
+ */
+ struct hist_entry_diff diff;
+ struct /* for TUI */ {
+ u16 row_offset;
+ u16 nr_rows;
+ bool init_have_children;
+ bool unfolded;
+ bool has_children;
+ bool has_no_entry;
+ };
+ };
+ char *srcline;
+ char *srcfile;
+ struct symbol *parent;
+ struct branch_info *branch_info;
+ long time;
+ struct hists *hists;
+ struct mem_info *mem_info;
+ struct block_info *block_info;
+ struct kvm_info *kvm_info;
+ void *raw_data;
+ u32 raw_size;
+ int num_res;
+ struct res_sample *res_samples;
+ void *trace_output;
+ struct perf_hpp_list *hpp_list;
+ struct hist_entry *parent_he;
+ struct hist_entry_ops *ops;
+ struct annotated_data_type *mem_type;
+ union {
+ /* this is for hierarchical entry structure */
+ struct {
+ struct rb_root_cached hroot_in;
+ struct rb_root_cached hroot_out;
+ }; /* non-leaf entries */
+ struct rb_root sorted_chain; /* leaf entry has callchains */
+ };
+ struct callchain_root callchain[0]; /* must be last member */
+};
+
+static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
+{
+ return he->callchain_size != 0;
+}
+
+static inline bool hist_entry__has_pairs(struct hist_entry *he)
+{
+ return !list_empty(&he->pairs.node);
+}
+
+static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
+{
+ if (hist_entry__has_pairs(he))
+ return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
+ return NULL;
+}
+
+static inline void hist_entry__add_pair(struct hist_entry *pair,
+ struct hist_entry *he)
+{
+ list_add_tail(&pair->pairs.node, &he->pairs.head);
+}
+
struct hist_entry *hists__add_entry(struct hists *hists,
struct addr_location *al,
struct symbol *parent,
@@ -186,6 +345,8 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
struct perf_hpp_fmt *fmt, int printed);
+int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size,
+ unsigned int width);
void hist_entry__delete(struct hist_entry *he);
typedef int (*hists__resort_cb_t)(struct hist_entry *he, void *arg);
@@ -238,6 +399,20 @@ void hists__match(struct hists *leader, struct hists *other);
int hists__link(struct hists *leader, struct hists *other);
int hists__unlink(struct hists *hists);
+static inline float hist_entry__get_percent_limit(struct hist_entry *he)
+{
+ u64 period = he->stat.period;
+ u64 total_period = hists__total_period(he->hists);
+
+ if (unlikely(total_period == 0))
+ return 0;
+
+ if (symbol_conf.cumulate_callchain)
+ period = he->stat_acc->period;
+
+ return period * 100.0 / total_period;
+}
+
struct hists_evsel {
struct evsel evsel;
struct hists hists;
@@ -377,6 +552,9 @@ enum {
PERF_HPP__OVERHEAD_ACC,
PERF_HPP__SAMPLES,
PERF_HPP__PERIOD,
+ PERF_HPP__WEIGHT1,
+ PERF_HPP__WEIGHT2,
+ PERF_HPP__WEIGHT3,
PERF_HPP__MAX_INDEX
};
@@ -423,16 +601,24 @@ void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists);
void perf_hpp__set_user_width(const char *width_list_str);
void hists__reset_column_width(struct hists *hists);
+enum perf_hpp_fmt_type {
+ PERF_HPP_FMT_TYPE__RAW,
+ PERF_HPP_FMT_TYPE__PERCENT,
+ PERF_HPP_FMT_TYPE__AVERAGE,
+};
+
typedef u64 (*hpp_field_fn)(struct hist_entry *he);
typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, hpp_field_fn get_field,
- const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent);
+ const char *fmtstr, hpp_snprint_fn print_fn,
+ enum perf_hpp_fmt_type fmtype);
int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he, hpp_field_fn get_field,
- const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent);
+ const char *fmtstr, hpp_snprint_fn print_fn,
+ enum perf_hpp_fmt_type fmtype);
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
@@ -460,15 +646,20 @@ struct hist_browser_timer {
int refresh;
};
-struct res_sample;
-
enum rstype {
A_NORMAL,
A_ASM,
A_SOURCE
};
-struct block_hist;
+struct block_hist {
+ struct hists block_hists;
+ struct perf_hpp_list block_list;
+ struct perf_hpp_fmt block_fmt;
+ int block_idx;
+ bool valid;
+ struct hist_entry he;
+};
#ifdef HAVE_SLANG_SUPPORT
#include "../ui/keysyms.h"
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 4db9a098f5..d6d7b75125 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -598,15 +598,15 @@ static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
struct auxtrace_cache *c;
unsigned int bits;
- if (dso->auxtrace_cache)
- return dso->auxtrace_cache;
+ if (dso__auxtrace_cache(dso))
+ return dso__auxtrace_cache(dso);
bits = intel_pt_cache_size(dso, machine);
/* Ignoring cache creation failure */
c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
- dso->auxtrace_cache = c;
+ dso__set_auxtrace_cache(dso, c);
return c;
}
@@ -650,7 +650,7 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
if (!c)
return NULL;
- return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
+ return auxtrace_cache__lookup(dso__auxtrace_cache(dso), offset);
}
static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
@@ -661,7 +661,7 @@ static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
if (!c)
return;
- auxtrace_cache__remove(dso->auxtrace_cache, offset);
+ auxtrace_cache__remove(dso__auxtrace_cache(dso), offset);
}
static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
@@ -821,8 +821,8 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
}
dso = map__dso(al.map);
- if (dso->data.status == DSO_DATA_STATUS_ERROR &&
- dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
+ if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR &&
+ dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
ret = -ENOENT;
goto out_ret;
}
@@ -855,7 +855,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
/* Load maps to ensure dso->is_64_bit has been updated */
map__load(al.map);
- x86_64 = dso->is_64_bit;
+ x86_64 = dso__is_64_bit(dso);
while (1) {
len = dso__data_read_offset(dso, machine,
@@ -1010,7 +1010,7 @@ static int __intel_pt_pgd_ip(uint64_t ip, void *data)
offset = map__map_ip(al.map, ip);
- res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
+ res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, dso__long_name(map__dso(al.map)));
addr_location__exit(&al);
return res;
}
@@ -3418,7 +3418,7 @@ static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
}
dso = map__dso(al.map);
- if (!dso || !dso->auxtrace_cache)
+ if (!dso || !dso__auxtrace_cache(dso))
continue;
offset = map__map_ip(al.map, addr);
@@ -3438,7 +3438,7 @@ static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
} else {
intel_pt_cache_invalidate(dso, machine, offset);
intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
- dso->long_name, addr);
+ dso__long_name(dso), addr);
}
}
out:
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 07c22f765f..8477edefc2 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -16,6 +16,7 @@
#include "map_symbol.h"
#include "branch.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "path.h"
#include "srcline.h"
#include "symbol.h"
@@ -48,13 +49,6 @@ static struct dso *machine__kernel_dso(struct machine *machine)
return map__dso(machine->vmlinux_map);
}
-static void dsos__init(struct dsos *dsos)
-{
- INIT_LIST_HEAD(&dsos->head);
- dsos->root = RB_ROOT;
- init_rwsem(&dsos->lock);
-}
-
static int machine__set_mmap_name(struct machine *machine)
{
if (machine__is_host(machine))
@@ -165,28 +159,6 @@ struct machine *machine__new_kallsyms(void)
return machine;
}
-static void dsos__purge(struct dsos *dsos)
-{
- struct dso *pos, *n;
-
- down_write(&dsos->lock);
-
- list_for_each_entry_safe(pos, n, &dsos->head, node) {
- RB_CLEAR_NODE(&pos->rb_node);
- pos->root = NULL;
- list_del_init(&pos->node);
- dso__put(pos);
- }
-
- up_write(&dsos->lock);
-}
-
-static void dsos__exit(struct dsos *dsos)
-{
- dsos__purge(dsos);
- exit_rwsem(&dsos->lock);
-}
-
void machine__delete_threads(struct machine *machine)
{
threads__remove_all_threads(&machine->threads);
@@ -675,31 +647,6 @@ int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
return 0;
}
-static struct dso *machine__findnew_module_dso(struct machine *machine,
- struct kmod_path *m,
- const char *filename)
-{
- struct dso *dso;
-
- down_write(&machine->dsos.lock);
-
- dso = __dsos__find(&machine->dsos, m->name, true);
- if (!dso) {
- dso = __dsos__addnew(&machine->dsos, m->name);
- if (dso == NULL)
- goto out_unlock;
-
- dso__set_module_info(dso, m, machine);
- dso__set_long_name(dso, strdup(filename), true);
- dso->kernel = DSO_SPACE__KERNEL;
- }
-
- dso__get(dso);
-out_unlock:
- up_write(&machine->dsos.lock);
- return dso;
-}
-
int machine__process_aux_event(struct machine *machine __maybe_unused,
union perf_event *event)
{
@@ -737,7 +684,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
struct perf_sample *sample __maybe_unused)
{
struct symbol *sym;
- struct dso *dso;
+ struct dso *dso = NULL;
struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
int err = 0;
@@ -748,16 +695,15 @@ static int machine__process_ksymbol_register(struct machine *machine,
err = -ENOMEM;
goto out;
}
- dso->kernel = DSO_SPACE__KERNEL;
+ dso__set_kernel(dso, DSO_SPACE__KERNEL);
map = map__new2(0, dso);
- dso__put(dso);
if (!map) {
err = -ENOMEM;
goto out;
}
if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
- dso->binary_type = DSO_BINARY_TYPE__OOL;
- dso->data.file_size = event->ksymbol.len;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL);
+ dso__data(dso)->file_size = event->ksymbol.len;
dso__set_loaded(dso);
}
@@ -772,11 +718,11 @@ static int machine__process_ksymbol_register(struct machine *machine,
dso__set_loaded(dso);
if (is_bpf_image(event->ksymbol.name)) {
- dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE);
dso__set_long_name(dso, "", false);
}
} else {
- dso = map__dso(map);
+ dso = dso__get(map__dso(map));
}
sym = symbol__new(map__map_ip(map, map__start(map)),
@@ -789,6 +735,7 @@ static int machine__process_ksymbol_register(struct machine *machine,
dso__insert_symbol(dso, sym);
out:
map__put(map);
+ dso__put(dso);
return err;
}
@@ -883,7 +830,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
if (kmod_path__parse_name(&m, filename))
return NULL;
- dso = machine__findnew_module_dso(machine, &m, filename);
+ dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename);
if (dso == NULL)
goto out;
@@ -907,11 +854,11 @@ out:
size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
{
struct rb_node *nd;
- size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
+ size_t ret = dsos__fprintf(&machines->host.dsos, fp);
for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
struct machine *pos = rb_entry(nd, struct machine, rb_node);
- ret += __dsos__fprintf(&pos->dsos.head, fp);
+ ret += dsos__fprintf(&pos->dsos, fp);
}
return ret;
@@ -920,7 +867,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
bool (skip)(struct dso *dso, int parm), int parm)
{
- return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
+ return dsos__fprintf_buildid(&m->dsos, fp, skip, parm);
}
size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
@@ -942,17 +889,17 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
size_t printed = 0;
struct dso *kdso = machine__kernel_dso(machine);
- if (kdso->has_build_id) {
+ if (dso__has_build_id(kdso)) {
char filename[PATH_MAX];
- if (dso__build_id_filename(kdso, filename, sizeof(filename),
- false))
+
+ if (dso__build_id_filename(kdso, filename, sizeof(filename), false))
printed += fprintf(fp, "[0] %s\n", filename);
}
- for (i = 0; i < vmlinux_path__nr_entries; ++i)
- printed += fprintf(fp, "[%d] %s\n",
- i + kdso->has_build_id, vmlinux_path[i]);
-
+ for (i = 0; i < vmlinux_path__nr_entries; ++i) {
+ printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso),
+ vmlinux_path[i]);
+ }
return printed;
}
@@ -1002,7 +949,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
DSO_SPACE__KERNEL_GUEST);
}
- if (kernel != NULL && (!kernel->has_build_id))
+ if (kernel != NULL && (!dso__has_build_id(kernel)))
dso__read_running_kernel_build_id(kernel, machine);
return kernel;
@@ -1367,8 +1314,8 @@ static char *get_kernel_version(const char *root_dir)
static bool is_kmod_dso(struct dso *dso)
{
- return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+ return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE;
}
static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
@@ -1395,8 +1342,8 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo
* we need to update the symtab_type if needed.
*/
if (m->comp && is_kmod_dso(dso)) {
- dso->symtab_type++;
- dso->comp = m->comp;
+ dso__set_symtab_type(dso, dso__symtab_type(dso));
+ dso__set_comp(dso, m->comp);
}
map__put(map);
return 0;
@@ -1616,16 +1563,14 @@ out_put:
return ret;
}
-static bool machine__uses_kcore(struct machine *machine)
+static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused)
{
- struct dso *dso;
-
- list_for_each_entry(dso, &machine->dsos.head, node) {
- if (dso__is_kcore(dso))
- return true;
- }
+ return dso__is_kcore(dso) ? 1 : 0;
+}
- return false;
+static bool machine__uses_kcore(struct machine *machine)
+{
+ return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false;
}
static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
@@ -1692,53 +1637,20 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
* Should be there already, from the build-id table in
* the header.
*/
- struct dso *kernel = NULL;
- struct dso *dso;
-
- down_read(&machine->dsos.lock);
-
- list_for_each_entry(dso, &machine->dsos.head, node) {
-
- /*
- * The cpumode passed to is_kernel_module is not the
- * cpumode of *this* event. If we insist on passing
- * correct cpumode to is_kernel_module, we should
- * record the cpumode when we adding this dso to the
- * linked list.
- *
- * However we don't really need passing correct
- * cpumode. We know the correct cpumode must be kernel
- * mode (if not, we should not link it onto kernel_dsos
- * list).
- *
- * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
- * is_kernel_module() treats it as a kernel cpumode.
- */
-
- if (!dso->kernel ||
- is_kernel_module(dso->long_name,
- PERF_RECORD_MISC_CPUMODE_UNKNOWN))
- continue;
-
-
- kernel = dso__get(dso);
- break;
- }
-
- up_read(&machine->dsos.lock);
+ struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
if (kernel == NULL)
kernel = machine__findnew_dso(machine, machine->mmap_name);
if (kernel == NULL)
goto out_problem;
- kernel->kernel = dso_space;
+ dso__set_kernel(kernel, dso_space);
if (__machine__create_kernel_maps(machine, kernel) < 0) {
dso__put(kernel);
goto out_problem;
}
- if (strstr(kernel->long_name, "vmlinux"))
+ if (strstr(dso__long_name(kernel), "vmlinux"))
dso__set_short_name(kernel, "[kernel.vmlinux]", false);
if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
@@ -2101,11 +2013,11 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
if (!mi)
return NULL;
- ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
- ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
+ ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
+ ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
sample->addr, sample->phys_addr,
sample->data_page_size);
- mi->data_src.val = sample->data_src;
+ mem_info__data_src(mi)->val = sample->data_src;
return mi;
}
@@ -2120,14 +2032,14 @@ static char *callchain_srcline(struct map_symbol *ms, u64 ip)
return srcline;
dso = map__dso(map);
- srcline = srcline__tree_find(&dso->srclines, ip);
+ srcline = srcline__tree_find(dso__srclines(dso), ip);
if (!srcline) {
bool show_sym = false;
bool show_addr = callchain_param.key == CCKEY_ADDRESS;
srcline = get_srcline(dso, map__rip_2objdump(map, ip),
ms->sym, show_sym, show_addr, ip);
- srcline__tree_insert(&dso->srclines, ip, srcline);
+ srcline__tree_insert(dso__srclines(dso), ip, srcline);
}
return srcline;
@@ -2925,12 +2837,12 @@ static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms
addr = map__rip_2objdump(map, addr);
dso = map__dso(map);
- inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
+ inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr);
if (!inline_node) {
inline_node = dso__parse_addr_inlines(dso, addr, sym);
if (!inline_node)
return ret;
- inlines__tree_insert(&dso->inlined_nodes, inline_node);
+ inlines__tree_insert(dso__inlined_nodes(dso), inline_node);
}
ilist_ms = (struct map_symbol) {
@@ -3219,21 +3131,33 @@ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, ch
if (sym == NULL)
return NULL;
- *modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
+ *modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
*addrp = map__unmap_ip(map, sym->start);
return sym->name;
}
+struct machine__for_each_dso_cb_args {
+ struct machine *machine;
+ machine__dso_t fn;
+ void *priv;
+};
+
+static int machine__for_each_dso_cb(struct dso *dso, void *data)
+{
+ struct machine__for_each_dso_cb_args *args = data;
+
+ return args->fn(dso, args->machine, args->priv);
+}
+
int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
{
- struct dso *pos;
- int err = 0;
+ struct machine__for_each_dso_cb_args args = {
+ .machine = machine,
+ .fn = fn,
+ .priv = priv,
+ };
- list_for_each_entry(pos, &machine->dsos.head, node) {
- if (fn(pos, machine, priv))
- err = -1;
- }
- return err;
+ return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args);
}
int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
@@ -3266,6 +3190,17 @@ bool machine__is_lock_function(struct machine *machine, u64 addr)
sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
machine->lock.text_end = map__unmap_ip(kmap, sym->start);
+
+ sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap);
+ if (sym) {
+ machine->traceiter.text_start = map__unmap_ip(kmap, sym->start);
+ machine->traceiter.text_end = map__unmap_ip(kmap, sym->end);
+ }
+ sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap);
+ if (sym) {
+ machine->trace.text_start = map__unmap_ip(kmap, sym->start);
+ machine->trace.text_end = map__unmap_ip(kmap, sym->end);
+ }
}
/* failed to get kernel symbols */
@@ -3280,5 +3215,23 @@ bool machine__is_lock_function(struct machine *machine, u64 addr)
if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
return true;
+ /* traceiter functions currently don't have their own section
+ * but we consider them lock functions
+ */
+ if (machine->traceiter.text_start != 0) {
+ if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end)
+ return true;
+ }
+
+ if (machine->trace.text_start != 0) {
+ if (machine->trace.text_start <= addr && addr < machine->trace.text_end)
+ return true;
+ }
+
return false;
}
+
+int machine__hit_all_dsos(struct machine *machine)
+{
+ return dsos__hit_all(&machine->dsos);
+}
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index e28c787616..82a47bac80 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -49,7 +49,7 @@ struct machine {
struct {
u64 text_start;
u64 text_end;
- } sched, lock;
+ } sched, lock, traceiter, trace;
pid_t *current_tid;
size_t current_tid_sz;
union { /* Tool specific area */
@@ -306,4 +306,6 @@ int machine__map_x86_64_entry_trampolines(struct machine *machine,
int machine__resolve(struct machine *machine, struct addr_location *al,
struct perf_sample *sample);
+int machine__hit_all_dsos(struct machine *machine);
+
#endif /* __PERF_MACHINE_H */
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 14a5ea70d8..e1d14936a6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -168,7 +168,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
if (dso == NULL)
goto out_delete;
- assert(!dso->kernel);
+ assert(!dso__kernel(dso));
map__init(result, start, start + len, pgoff, dso);
if (anon || no_dso) {
@@ -182,10 +182,9 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
if (!(prot & PROT_EXEC))
dso__set_loaded(dso);
}
- mutex_lock(&dso->lock);
- nsinfo__put(dso->nsinfo);
- dso->nsinfo = nsi;
- mutex_unlock(&dso->lock);
+ mutex_lock(dso__lock(dso));
+ dso__set_nsinfo(dso, nsi);
+ mutex_unlock(dso__lock(dso));
if (build_id__is_defined(bid)) {
dso__set_build_id(dso, bid);
@@ -196,13 +195,12 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
* reading the header will have the build ID set and all future mmaps will
* have it missing.
*/
- down_read(&machine->dsos.lock);
- header_bid_dso = __dsos__find(&machine->dsos, filename, false);
- up_read(&machine->dsos.lock);
- if (header_bid_dso && header_bid_dso->header_build_id) {
- dso__set_build_id(dso, &header_bid_dso->bid);
- dso->header_build_id = 1;
+ header_bid_dso = dsos__find(&machine->dsos, filename, false);
+ if (header_bid_dso && dso__header_build_id(header_bid_dso)) {
+ dso__set_build_id(dso, dso__bid(header_bid_dso));
+ dso__set_header_build_id(dso, 1);
}
+ dso__put(header_bid_dso);
}
dso__put(dso);
}
@@ -223,7 +221,7 @@ struct map *map__new2(u64 start, struct dso *dso)
struct map *result;
RC_STRUCT(map) *map;
- map = calloc(1, sizeof(*map) + (dso->kernel ? sizeof(struct kmap) : 0));
+ map = calloc(1, sizeof(*map) + (dso__kernel(dso) ? sizeof(struct kmap) : 0));
if (ADD_RC_CHK(result, map)) {
/*
* ->end will be filled after we load all the symbols
@@ -236,7 +234,7 @@ struct map *map__new2(u64 start, struct dso *dso)
bool __map__is_kernel(const struct map *map)
{
- if (!map__dso(map)->kernel)
+ if (!dso__kernel(map__dso(map)))
return false;
return machine__kernel_map(maps__machine(map__kmaps((struct map *)map))) == map;
}
@@ -253,7 +251,7 @@ bool __map__is_bpf_prog(const struct map *map)
const char *name;
struct dso *dso = map__dso(map);
- if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
return true;
/*
@@ -261,7 +259,7 @@ bool __map__is_bpf_prog(const struct map *map)
* type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can
* guess the type based on name.
*/
- name = dso->short_name;
+ name = dso__short_name(dso);
return name && (strstr(name, "bpf_prog_") == name);
}
@@ -270,7 +268,7 @@ bool __map__is_bpf_image(const struct map *map)
const char *name;
struct dso *dso = map__dso(map);
- if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
+ if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE)
return true;
/*
@@ -278,7 +276,7 @@ bool __map__is_bpf_image(const struct map *map)
* type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
* guess the type based on name.
*/
- name = dso->short_name;
+ name = dso__short_name(dso);
return name && is_bpf_image(name);
}
@@ -286,7 +284,7 @@ bool __map__is_ool(const struct map *map)
{
const struct dso *dso = map__dso(map);
- return dso && dso->binary_type == DSO_BINARY_TYPE__OOL;
+ return dso && dso__binary_type(dso) == DSO_BINARY_TYPE__OOL;
}
bool map__has_symbols(const struct map *map)
@@ -317,7 +315,7 @@ void map__put(struct map *map)
void map__fixup_start(struct map *map)
{
struct dso *dso = map__dso(map);
- struct rb_root_cached *symbols = &dso->symbols;
+ struct rb_root_cached *symbols = dso__symbols(dso);
struct rb_node *nd = rb_first_cached(symbols);
if (nd != NULL) {
@@ -330,7 +328,7 @@ void map__fixup_start(struct map *map)
void map__fixup_end(struct map *map)
{
struct dso *dso = map__dso(map);
- struct rb_root_cached *symbols = &dso->symbols;
+ struct rb_root_cached *symbols = dso__symbols(dso);
struct rb_node *nd = rb_last(&symbols->rb_root);
if (nd != NULL) {
@@ -344,7 +342,7 @@ void map__fixup_end(struct map *map)
int map__load(struct map *map)
{
struct dso *dso = map__dso(map);
- const char *name = dso->long_name;
+ const char *name = dso__long_name(dso);
int nr;
if (dso__loaded(dso))
@@ -352,10 +350,10 @@ int map__load(struct map *map)
nr = dso__load(dso, map);
if (nr < 0) {
- if (dso->has_build_id) {
+ if (dso__has_build_id(dso)) {
char sbuild_id[SBUILD_ID_SIZE];
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
pr_debug("%s with build id %s not found", name, sbuild_id);
} else
pr_debug("Failed to open %s", name);
@@ -417,7 +415,7 @@ struct map *map__clone(struct map *from)
size_t size = sizeof(RC_STRUCT(map));
struct dso *dso = map__dso(from);
- if (dso && dso->kernel)
+ if (dso && dso__kernel(dso))
size += sizeof(struct kmap);
map = memdup(RC_CHK_ACCESS(from), size);
@@ -434,14 +432,14 @@ size_t map__fprintf(struct map *map, FILE *fp)
const struct dso *dso = map__dso(map);
return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
- map__start(map), map__end(map), map__pgoff(map), dso->name);
+ map__start(map), map__end(map), map__pgoff(map), dso__name(dso));
}
static bool prefer_dso_long_name(const struct dso *dso, bool print_off)
{
- return dso->long_name &&
+ return dso__long_name(dso) &&
(symbol_conf.show_kernel_path ||
- (print_off && (dso->name[0] == '[' || dso__is_kcore(dso))));
+ (print_off && (dso__name(dso)[0] == '[' || dso__is_kcore(dso))));
}
static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp)
@@ -452,9 +450,9 @@ static size_t __map__fprintf_dsoname(struct map *map, bool print_off, FILE *fp)
if (dso) {
if (prefer_dso_long_name(dso, print_off))
- dsoname = dso->long_name;
+ dsoname = dso__long_name(dso);
else
- dsoname = dso->name;
+ dsoname = dso__name(dso);
}
if (symbol_conf.pad_output_len_dso) {
@@ -547,14 +545,14 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
}
}
- if (!dso->adjust_symbols)
+ if (!dso__adjust_symbols(dso))
return rip;
- if (dso->rel)
+ if (dso__rel(dso))
return rip - map__pgoff(map);
- if (dso->kernel == DSO_SPACE__USER)
- return rip + dso->text_offset;
+ if (dso__kernel(dso) == DSO_SPACE__USER)
+ return rip + dso__text_offset(dso);
return map__unmap_ip(map, rip) - map__reloc(map);
}
@@ -575,18 +573,35 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
{
const struct dso *dso = map__dso(map);
- if (!dso->adjust_symbols)
+ if (!dso__adjust_symbols(dso))
return map__unmap_ip(map, ip);
- if (dso->rel)
+ if (dso__rel(dso))
return map__unmap_ip(map, ip + map__pgoff(map));
- if (dso->kernel == DSO_SPACE__USER)
- return map__unmap_ip(map, ip - dso->text_offset);
+ if (dso__kernel(dso) == DSO_SPACE__USER)
+ return map__unmap_ip(map, ip - dso__text_offset(dso));
return ip + map__reloc(map);
}
+/* convert objdump address to relative address. (To be removed) */
+u64 map__objdump_2rip(struct map *map, u64 ip)
+{
+ const struct dso *dso = map__dso(map);
+
+ if (!dso__adjust_symbols(dso))
+ return ip;
+
+ if (dso__rel(dso))
+ return ip + map__pgoff(map);
+
+ if (dso__kernel(dso) == DSO_SPACE__USER)
+ return ip - dso__text_offset(dso);
+
+ return map__map_ip(map, ip + map__reloc(map));
+}
+
bool map__contains_symbol(const struct map *map, const struct symbol *sym)
{
u64 ip = map__unmap_ip(map, sym->start);
@@ -598,7 +613,7 @@ struct kmap *__map__kmap(struct map *map)
{
const struct dso *dso = map__dso(map);
- if (!dso || !dso->kernel)
+ if (!dso || !dso__kernel(dso))
return NULL;
return (struct kmap *)(&RC_CHK_ACCESS(map)[1]);
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 49756716cb..65e2609fa1 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -132,6 +132,9 @@ u64 map__rip_2objdump(struct map *map, u64 rip);
/* objdump address -> memory address */
u64 map__objdump_2mem(struct map *map, u64 ip);
+/* objdump address -> rip */
+u64 map__objdump_2rip(struct map *map, u64 ip);
+
struct symbol;
struct thread;
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index ce13145a9f..eaada3e0f5 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -76,7 +76,7 @@ static void check_invariants(const struct maps *maps __maybe_unused)
/* Expect at least 1 reference count. */
assert(refcount_read(map__refcnt(map)) > 0);
- if (map__dso(map) && map__dso(map)->kernel)
+ if (map__dso(map) && dso__kernel(map__dso(map)))
assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
if (i > 0) {
@@ -124,11 +124,6 @@ static void maps__set_maps_by_address(struct maps *maps, struct map **new)
}
-static struct map ***maps__maps_by_name_addr(struct maps *maps)
-{
- return &RC_CHK_ACCESS(maps)->maps_by_name;
-}
-
static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
{
RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
@@ -211,11 +206,6 @@ void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libun
static struct rw_semaphore *maps__lock(struct maps *maps)
{
- /*
- * When the lock is acquired or released the maps invariants should
- * hold.
- */
- check_invariants(maps);
return &RC_CHK_ACCESS(maps)->lock;
}
@@ -289,6 +279,9 @@ void maps__put(struct maps *maps)
static void __maps__free_maps_by_name(struct maps *maps)
{
+ if (!maps__maps_by_name(maps))
+ return;
+
/*
* Free everything to try to do it from the rbtree in the next search
*/
@@ -296,6 +289,9 @@ static void __maps__free_maps_by_name(struct maps *maps)
map__put(maps__maps_by_name(maps)[i]);
zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
+
+ /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
+ maps__set_maps_by_name_sorted(maps, false);
}
static int map__start_cmp(const void *a, const void *b)
@@ -346,7 +342,7 @@ static int map__strcmp(const void *a, const void *b)
const struct map *map_b = *(const struct map * const *)b;
const struct dso *dso_a = map__dso(map_a);
const struct dso *dso_b = map__dso(map_b);
- int ret = strcmp(dso_a->short_name, dso_b->short_name);
+ int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
/* Ensure distinct but name equal maps have an order. */
@@ -358,6 +354,7 @@ static int map__strcmp(const void *a, const void *b)
static int maps__sort_by_name(struct maps *maps)
{
int err = 0;
+
down_write(maps__lock(maps));
if (!maps__maps_by_name_sorted(maps)) {
struct map **maps_by_name = maps__maps_by_name(maps);
@@ -384,6 +381,7 @@ static int maps__sort_by_name(struct maps *maps)
maps__set_maps_by_name_sorted(maps, true);
}
}
+ check_invariants(maps);
up_write(maps__lock(maps));
return err;
}
@@ -485,7 +483,7 @@ static int __maps__insert(struct maps *maps, struct map *new)
}
if (map__end(new) < map__start(new))
RC_CHK_ACCESS(maps)->ends_broken = true;
- if (dso && dso->kernel) {
+ if (dso && dso__kernel(dso)) {
struct kmap *kmap = map__kmap(new);
if (kmap)
@@ -502,6 +500,7 @@ int maps__insert(struct maps *maps, struct map *map)
down_write(maps__lock(maps));
ret = __maps__insert(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
return ret;
}
@@ -536,6 +535,7 @@ void maps__remove(struct maps *maps, struct map *map)
{
down_write(maps__lock(maps));
__maps__remove(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -602,6 +602,7 @@ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data
else
i++;
}
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -740,7 +741,6 @@ static unsigned int first_ending_after(struct maps *maps, const struct map *map)
*/
static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{
- struct map **maps_by_address;
int err = 0;
FILE *fp = debug_file();
@@ -748,12 +748,12 @@ sort_again:
if (!maps__maps_by_address_sorted(maps))
__maps__sort_by_address(maps);
- maps_by_address = maps__maps_by_address(maps);
/*
* Iterate through entries where the end of the existing entry is
* greater-than the new map's start.
*/
for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
+ struct map **maps_by_address = maps__maps_by_address(maps);
struct map *pos = maps_by_address[i];
struct map *before = NULL, *after = NULL;
@@ -766,7 +766,7 @@ sort_again:
if (use_browser) {
pr_debug("overlapping maps in %s (disable tui for more info)\n",
- map__dso(new)->name);
+ dso__name(map__dso(new)));
} else if (verbose >= 2) {
pr_debug("overlapping maps:\n");
map__fprintf(new, fp);
@@ -820,8 +820,10 @@ sort_again:
/* Maps are still ordered, go to next one. */
i++;
if (after) {
- __maps__insert(maps, after);
+ err = __maps__insert(maps, after);
map__put(after);
+ if (err)
+ goto out_err;
if (!maps__maps_by_address_sorted(maps)) {
/*
* Sorting broken so invariants don't
@@ -850,7 +852,7 @@ sort_again:
check_invariants(maps);
}
/* Add the map. */
- __maps__insert(maps, new);
+ err = __maps__insert(maps, new);
out_err:
return err;
}
@@ -942,6 +944,8 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
map__put(new);
}
}
+ check_invariants(dest);
+
up_read(maps__lock(parent));
up_write(maps__lock(dest));
return err;
@@ -987,7 +991,7 @@ static int map__strcmp_name(const void *name, const void *b)
{
const struct dso *dso = map__dso(*(const struct map **)b);
- return strcmp(name, dso->short_name);
+ return strcmp(name, dso__short_name(dso));
}
struct map *maps__find_by_name(struct maps *maps, const char *name)
@@ -1006,7 +1010,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
- if (dso && strcmp(dso->short_name, name) == 0) {
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
result = map__get(maps__maps_by_name(maps)[i]);
done = true;
}
@@ -1043,7 +1047,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
struct map *pos = maps_by_address[i];
struct dso *dso = map__dso(pos);
- if (dso && strcmp(dso->short_name, name) == 0) {
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
result = map__get(pos);
break;
}
@@ -1097,6 +1101,7 @@ void maps__fixup_end(struct maps *maps)
map__set_end(maps_by_address[n - 1], ~0ULL);
RC_CHK_ACCESS(maps)->ends_broken = false;
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -1147,6 +1152,8 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
/* No overlap so regular insert suffices. */
int ret = __maps__insert(kmaps, new_map);
+
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return ret;
}
@@ -1162,8 +1169,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
}
maps__set_maps_by_address(kmaps, merged_maps_by_address);
maps__set_maps_by_address_sorted(kmaps, true);
- zfree(maps__maps_by_name_addr(kmaps));
- maps__set_maps_by_name_sorted(kmaps, true);
+ __maps__free_maps_by_name(kmaps);
maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
/* Copy entries before the new_map that can't overlap. */
@@ -1184,6 +1190,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__zput(kmaps_maps_by_address[i]);
free(kmaps_maps_by_address);
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return 0;
}
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index 637cbd4a7b..6dda47bb77 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include "map_symbol.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "debug.h"
+#include "evsel.h"
#include "symbol.h"
#include "pmu.h"
#include "pmus.h"
@@ -281,7 +283,7 @@ static const char * const tlb_access[] = {
"Fault",
};
-int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
size_t l = 0, i;
u64 m = PERF_MEM_TLB_NA;
@@ -291,7 +293,7 @@ int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
out[0] = '\0';
if (mem_info)
- m = mem_info->data_src.mem_dtlb;
+ m = mem_info__const_data_src(mem_info)->mem_dtlb;
hit = m & PERF_MEM_TLB_HIT;
miss = m & PERF_MEM_TLB_MISS;
@@ -359,13 +361,13 @@ static const char * const mem_hops[] = {
"board",
};
-static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+static int perf_mem__op_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
u64 op = PERF_MEM_LOCK_NA;
int l;
if (mem_info)
- op = mem_info->data_src.mem_op;
+ op = mem_info__const_data_src(mem_info)->mem_op;
if (op & PERF_MEM_OP_NA)
l = scnprintf(out, sz, "N/A");
@@ -383,7 +385,7 @@ static int perf_mem__op_scnprintf(char *out, size_t sz, struct mem_info *mem_inf
return l;
}
-int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
union perf_mem_data_src data_src;
int printed = 0;
@@ -398,7 +400,7 @@ int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
if (!mem_info)
goto na;
- data_src = mem_info->data_src;
+ data_src = *mem_info__const_data_src(mem_info);
if (data_src.mem_lvl & PERF_MEM_LVL_HIT)
memcpy(hit_miss, "hit", 3);
@@ -465,7 +467,7 @@ static const char * const snoopx_access[] = {
"Peer",
};
-int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
size_t i, l = 0;
u64 m = PERF_MEM_SNOOP_NA;
@@ -474,7 +476,7 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
out[0] = '\0';
if (mem_info)
- m = mem_info->data_src.mem_snoop;
+ m = mem_info__const_data_src(mem_info)->mem_snoop;
for (i = 0; m && i < ARRAY_SIZE(snoop_access); i++, m >>= 1) {
if (!(m & 0x1))
@@ -488,7 +490,7 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
m = 0;
if (mem_info)
- m = mem_info->data_src.mem_snoopx;
+ m = mem_info__const_data_src(mem_info)->mem_snoopx;
for (i = 0; m && i < ARRAY_SIZE(snoopx_access); i++, m >>= 1) {
if (!(m & 0x1))
@@ -507,13 +509,13 @@ int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
return l;
}
-int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
u64 mask = PERF_MEM_LOCK_NA;
int l;
if (mem_info)
- mask = mem_info->data_src.mem_lock;
+ mask = mem_info__const_data_src(mem_info)->mem_lock;
if (mask & PERF_MEM_LOCK_NA)
l = scnprintf(out, sz, "N/A");
@@ -525,7 +527,7 @@ int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
return l;
}
-int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
size_t l = 0;
u64 mask = PERF_MEM_BLK_NA;
@@ -534,7 +536,7 @@ int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
out[0] = '\0';
if (mem_info)
- mask = mem_info->data_src.mem_blk;
+ mask = mem_info__const_data_src(mem_info)->mem_blk;
if (!mask || (mask & PERF_MEM_BLK_NA)) {
l += scnprintf(out + l, sz - l, " N/A");
@@ -548,7 +550,7 @@ int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
return l;
}
-int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_info)
+int perf_script__meminfo_scnprintf(char *out, size_t sz, const struct mem_info *mem_info)
{
int i = 0;
@@ -570,8 +572,8 @@ int perf_script__meminfo_scnprintf(char *out, size_t sz, struct mem_info *mem_in
int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi)
{
- union perf_mem_data_src *data_src = &mi->data_src;
- u64 daddr = mi->daddr.addr;
+ union perf_mem_data_src *data_src = mem_info__data_src(mi);
+ u64 daddr = mem_info__daddr(mi)->addr;
u64 op = data_src->mem_op;
u64 lvl = data_src->mem_lvl;
u64 snoop = data_src->mem_snoop;
@@ -698,7 +700,7 @@ do { \
return -1;
}
- if (!mi->daddr.ms.map || !mi->iaddr.ms.map) {
+ if (!mem_info__daddr(mi)->ms.map || !mem_info__iaddr(mi)->ms.map) {
stats->nomap++;
return -1;
}
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 15d5f0320d..ca31014d79 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -3,13 +3,7 @@
#define __PERF_MEM_EVENTS_H
#include <stdbool.h>
-#include <stdint.h>
-#include <stdio.h>
#include <linux/types.h>
-#include <linux/refcount.h>
-#include <linux/perf_event.h>
-#include "stat.h"
-#include "evsel.h"
struct perf_mem_event {
bool record;
@@ -21,13 +15,6 @@ struct perf_mem_event {
const char *event_name;
};
-struct mem_info {
- struct addr_map_symbol iaddr;
- struct addr_map_symbol daddr;
- union perf_mem_data_src data_src;
- refcount_t refcnt;
-};
-
enum {
PERF_MEM_EVENTS__LOAD,
PERF_MEM_EVENTS__STORE,
@@ -35,6 +22,10 @@ enum {
PERF_MEM_EVENTS__MAX,
};
+struct evsel;
+struct mem_info;
+struct perf_pmu;
+
extern unsigned int perf_mem_events__loads_ldlat;
extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
@@ -49,13 +40,13 @@ bool is_mem_loads_aux_event(struct evsel *leader);
void perf_pmu__mem_events_list(struct perf_pmu *pmu);
int perf_mem_events__record_args(const char **rec_argv, int *argv_nr);
-int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
-int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
-int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
-int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
-int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
+int perf_mem__tlb_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
+int perf_mem__lvl_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
+int perf_mem__snp_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
+int perf_mem__lck_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
+int perf_mem__blk_scnprintf(char *out, size_t sz, const struct mem_info *mem_info);
-int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info);
+int perf_script__meminfo_scnprintf(char *bf, size_t size, const struct mem_info *mem_info);
struct c2c_stats {
u32 nr_entries;
diff --git a/tools/perf/util/mem-info.c b/tools/perf/util/mem-info.c
new file mode 100644
index 0000000000..27d67721a6
--- /dev/null
+++ b/tools/perf/util/mem-info.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/zalloc.h>
+#include "mem-info.h"
+
+struct mem_info *mem_info__get(struct mem_info *mi)
+{
+ struct mem_info *result;
+
+ if (RC_CHK_GET(result, mi))
+ refcount_inc(mem_info__refcnt(mi));
+
+ return result;
+}
+
+void mem_info__put(struct mem_info *mi)
+{
+ if (mi && refcount_dec_and_test(mem_info__refcnt(mi))) {
+ addr_map_symbol__exit(mem_info__iaddr(mi));
+ addr_map_symbol__exit(mem_info__daddr(mi));
+ RC_CHK_FREE(mi);
+ } else {
+ RC_CHK_PUT(mi);
+ }
+}
+
+struct mem_info *mem_info__new(void)
+{
+ struct mem_info *result = NULL;
+ RC_STRUCT(mem_info) *mi = zalloc(sizeof(*mi));
+
+ if (ADD_RC_CHK(result, mi))
+ refcount_set(mem_info__refcnt(result), 1);
+
+ return result;
+}
diff --git a/tools/perf/util/mem-info.h b/tools/perf/util/mem-info.h
new file mode 100644
index 0000000000..0f68e29f31
--- /dev/null
+++ b/tools/perf/util/mem-info.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_MEM_INFO_H
+#define __PERF_MEM_INFO_H
+
+#include <linux/refcount.h>
+#include <linux/perf_event.h>
+#include <internal/rc_check.h>
+#include "map_symbol.h"
+
+DECLARE_RC_STRUCT(mem_info) {
+ struct addr_map_symbol iaddr;
+ struct addr_map_symbol daddr;
+ union perf_mem_data_src data_src;
+ refcount_t refcnt;
+};
+
+struct mem_info *mem_info__new(void);
+struct mem_info *mem_info__get(struct mem_info *mi);
+void mem_info__put(struct mem_info *mi);
+
+static inline void __mem_info__zput(struct mem_info **mi)
+{
+ mem_info__put(*mi);
+ *mi = NULL;
+}
+
+#define mem_info__zput(mi) __mem_info__zput(&mi)
+
+static inline struct addr_map_symbol *mem_info__iaddr(struct mem_info *mi)
+{
+ return &RC_CHK_ACCESS(mi)->iaddr;
+}
+
+static inline struct addr_map_symbol *mem_info__daddr(struct mem_info *mi)
+{
+ return &RC_CHK_ACCESS(mi)->daddr;
+}
+
+static inline union perf_mem_data_src *mem_info__data_src(struct mem_info *mi)
+{
+ return &RC_CHK_ACCESS(mi)->data_src;
+}
+
+static inline const union perf_mem_data_src *mem_info__const_data_src(const struct mem_info *mi)
+{
+ return &RC_CHK_ACCESS(mi)->data_src;
+}
+
+static inline refcount_t *mem_info__refcnt(struct mem_info *mi)
+{
+ return &RC_CHK_ACCESS(mi)->refcnt;
+}
+
+#endif /* __PERF_MEM_INFO_H */
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 79ef6095ab..69f6a46402 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -455,7 +455,7 @@ static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
const char *g;
char *omg, *mg;
- mg = strdup(pm->metric_group ?: "No_group");
+ mg = strdup(pm->metric_group ?: pm->metric_name);
if (!mg)
return -ENOMEM;
omg = mg;
@@ -466,7 +466,7 @@ static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
if (strlen(g))
me = mep_lookup(groups, g, pm->metric_name);
else
- me = mep_lookup(groups, "No_group", pm->metric_name);
+ me = mep_lookup(groups, pm->metric_name, pm->metric_name);
if (me) {
me->metric_desc = pm->desc;
@@ -1502,7 +1502,8 @@ static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
pr_debug("Parsing metric events '%s'\n", events.buf);
parse_events_error__init(&parse_error);
ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
- &parse_error, fake_pmu, /*warn_if_reordered=*/false);
+ &parse_error, fake_pmu, /*warn_if_reordered=*/false,
+ /*fake_tp=*/false);
if (ret) {
parse_events_error__print(&parse_error, events.buf);
goto err_out;
@@ -1690,12 +1691,15 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
+ bool hardware_aware_grouping,
struct rblist *metric_events)
{
const struct pmu_metrics_table *table = pmu_metrics_table__find();
if (!table)
return -EINVAL;
+ if (hardware_aware_grouping)
+ pr_debug("Use hardware aware grouping instead of traditional metric grouping method\n");
return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
metric_no_threshold, user_requested_cpu_list, system_wide,
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index d5325c6ec8..779f6ede1b 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -77,6 +77,7 @@ int metricgroup__parse_groups(struct evlist *perf_evlist,
bool metric_no_threshold,
const char *user_requested_cpu_list,
bool system_wide,
+ bool hardware_aware_grouping,
struct rblist *metric_events);
int metricgroup__parse_groups_test(struct evlist *evlist,
const struct pmu_metrics_table *table,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 6f8b0fa176..6ed0f9c558 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -34,11 +34,12 @@
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
-static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms);
+static int get_config_terms(const struct parse_events_terms *head_config,
+ struct list_head *head_terms);
static int parse_events_terms__copy(const struct parse_events_terms *src,
struct parse_events_terms *dest);
-struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
+const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = {
.symbol = "cpu-cycles",
.alias = "cycles",
@@ -81,7 +82,7 @@ struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
},
};
-struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
+const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
[PERF_COUNT_SW_CPU_CLOCK] = {
.symbol = "cpu-clock",
.alias = "",
@@ -154,7 +155,7 @@ const char *event_type(int type)
return "unknown";
}
-static char *get_config_str(struct parse_events_terms *head_terms,
+static char *get_config_str(const struct parse_events_terms *head_terms,
enum parse_events__term_type type_term)
{
struct parse_events_term *term;
@@ -169,12 +170,12 @@ static char *get_config_str(struct parse_events_terms *head_terms,
return NULL;
}
-static char *get_config_metric_id(struct parse_events_terms *head_terms)
+static char *get_config_metric_id(const struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_METRIC_ID);
}
-static char *get_config_name(struct parse_events_terms *head_terms)
+static char *get_config_name(const struct parse_events_terms *head_terms)
{
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
@@ -358,7 +359,7 @@ static int config_term_common(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err);
static int config_attr(struct perf_event_attr *attr,
- struct parse_events_terms *head,
+ const struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term);
@@ -442,17 +443,21 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
return strcmp(parse_state->pmu_filter, pmu->name) != 0;
}
+static int parse_events_add_pmu(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_pmu *pmu,
+ const struct parse_events_terms *const_parsed_terms,
+ bool auto_merge_stats);
+
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct parse_events_terms *head_config)
+ struct parse_events_terms *parsed_terms)
{
struct perf_pmu *pmu = NULL;
bool found_supported = false;
- const char *config_name = get_config_name(head_config);
- const char *metric_id = get_config_metric_id(head_config);
+ const char *config_name = get_config_name(parsed_terms);
+ const char *metric_id = get_config_metric_id(parsed_terms);
- /* Legacy cache events are only supported by core PMUs. */
- while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ while ((pmu = perf_pmus__scan(pmu)) != NULL) {
LIST_HEAD(config_terms);
struct perf_event_attr attr;
int ret;
@@ -460,6 +465,24 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
if (parse_events__filter_pmu(parse_state, pmu))
continue;
+ if (perf_pmu__have_event(pmu, name)) {
+ /*
+ * The PMU has the event so add as not a legacy cache
+ * event.
+ */
+ ret = parse_events_add_pmu(parse_state, list, pmu,
+ parsed_terms,
+ perf_pmu__auto_merge_stats(pmu));
+ if (ret)
+ return ret;
+ continue;
+ }
+
+ if (!pmu->is_core) {
+ /* Legacy cache events are only supported by core PMUs. */
+ continue;
+ }
+
memset(&attr, 0, sizeof(attr));
attr.type = PERF_TYPE_HW_CACHE;
@@ -469,11 +492,12 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
found_supported = true;
- if (head_config) {
- if (config_attr(&attr, head_config, parse_state->error, config_term_common))
+ if (parsed_terms) {
+ if (config_attr(&attr, parsed_terms, parse_state->error,
+ config_term_common))
return -EINVAL;
- if (get_config_terms(head_config, &config_terms))
+ if (get_config_terms(parsed_terms, &config_terms))
return -ENOMEM;
}
@@ -519,13 +543,15 @@ static void tracepoint_error(struct parse_events_error *e, int err,
parse_events_error__handle(e, column, strdup(str), strdup(help));
}
-static int add_tracepoint(struct list_head *list, int *idx,
+static int add_tracepoint(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
- struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, (*idx)++);
+ struct evsel *evsel = evsel__newtp_idx(sys_name, evt_name, parse_state->idx++,
+ !parse_state->fake_tp);
if (IS_ERR(evsel)) {
tracepoint_error(err, PTR_ERR(evsel), sys_name, evt_name, loc->first_column);
@@ -544,7 +570,8 @@ static int add_tracepoint(struct list_head *list, int *idx,
return 0;
}
-static int add_tracepoint_multi_event(struct list_head *list, int *idx,
+static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
@@ -578,7 +605,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
found++;
- ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name,
+ ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
err, head_config, loc);
}
@@ -592,19 +619,21 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
return ret;
}
-static int add_tracepoint_event(struct list_head *list, int *idx,
+static int add_tracepoint_event(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
{
return strpbrk(evt_name, "*?") ?
- add_tracepoint_multi_event(list, idx, sys_name, evt_name,
+ add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
err, head_config, loc) :
- add_tracepoint(list, idx, sys_name, evt_name,
+ add_tracepoint(parse_state, list, sys_name, evt_name,
err, head_config, loc);
}
-static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
+static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys_name, const char *evt_name,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
@@ -630,7 +659,7 @@ static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
if (!strglobmatch(events_ent->d_name, sys_name))
continue;
- ret = add_tracepoint_event(list, idx, events_ent->d_name,
+ ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
evt_name, err, head_config, loc);
}
@@ -1085,7 +1114,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
#endif
static int config_attr(struct perf_event_attr *attr,
- struct parse_events_terms *head,
+ const struct parse_events_terms *head,
struct parse_events_error *err,
config_term_func_t config_term)
{
@@ -1098,7 +1127,8 @@ static int config_attr(struct perf_event_attr *attr,
return 0;
}
-static int get_config_terms(struct parse_events_terms *head_config, struct list_head *head_terms)
+static int get_config_terms(const struct parse_events_terms *head_config,
+ struct list_head *head_terms)
{
#define ADD_CONFIG_TERM(__type, __weak) \
struct evsel_config_term *__t; \
@@ -1266,7 +1296,8 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
return 0;
}
-int parse_events_add_tracepoint(struct list_head *list, int *idx,
+int parse_events_add_tracepoint(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys, const char *event,
struct parse_events_error *err,
struct parse_events_terms *head_config, void *loc_)
@@ -1282,14 +1313,14 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
}
if (strpbrk(sys, "*?"))
- return add_tracepoint_multi_sys(list, idx, sys, event,
+ return add_tracepoint_multi_sys(parse_state, list, sys, event,
err, head_config, loc);
else
- return add_tracepoint_event(list, idx, sys, event,
+ return add_tracepoint_event(parse_state, list, sys, event,
err, head_config, loc);
#else
+ (void)parse_state;
(void)list;
- (void)idx;
(void)sys;
(void)event;
(void)head_config;
@@ -1302,7 +1333,7 @@ int parse_events_add_tracepoint(struct list_head *list, int *idx,
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, struct parse_events_terms *head_config)
+ u64 config, const struct parse_events_terms *head_config)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
@@ -1338,7 +1369,7 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct parse_events_terms *head_config,
+ const struct parse_events_terms *head_config,
bool wildcard)
{
struct perf_pmu *pmu = NULL;
@@ -1385,56 +1416,34 @@ static bool config_term_percore(struct list_head *config_terms)
return false;
}
-int parse_events_add_pmu(struct parse_events_state *parse_state,
- struct list_head *list, const char *name,
- const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, void *loc_)
+static int parse_events_add_pmu(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_pmu *pmu,
+ const struct parse_events_terms *const_parsed_terms,
+ bool auto_merge_stats)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
- struct perf_pmu *pmu;
struct evsel *evsel;
struct parse_events_error *err = parse_state->error;
- YYLTYPE *loc = loc_;
LIST_HEAD(config_terms);
struct parse_events_terms parsed_terms;
bool alias_rewrote_terms = false;
- pmu = parse_state->fake_pmu ?: perf_pmus__find(name);
-
- if (!pmu) {
- char *err_str;
-
- if (asprintf(&err_str,
- "Cannot find PMU `%s'. Missing kernel support?",
- name) >= 0)
- parse_events_error__handle(err, loc->first_column, err_str, NULL);
- return -EINVAL;
- }
-
- parse_events_terms__init(&parsed_terms);
- if (const_parsed_terms) {
- int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
-
- if (ret)
- return ret;
- }
-
if (verbose > 1) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
- if (pmu->selectable && list_empty(&parsed_terms.terms)) {
- strbuf_addf(&sb, "%s//", name);
+ if (pmu->selectable && const_parsed_terms &&
+ list_empty(&const_parsed_terms->terms)) {
+ strbuf_addf(&sb, "%s//", pmu->name);
} else {
- strbuf_addf(&sb, "%s/", name);
- parse_events_terms__to_strbuf(&parsed_terms, &sb);
+ strbuf_addf(&sb, "%s/", pmu->name);
+ parse_events_terms__to_strbuf(const_parsed_terms, &sb);
strbuf_addch(&sb, '/');
}
fprintf(stderr, "Attempt to add: %s\n", sb.buf);
strbuf_release(&sb);
}
- fix_raw(&parsed_terms, pmu);
memset(&attr, 0, sizeof(attr));
if (pmu->perf_event_attr_init_default)
@@ -1442,7 +1451,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
attr.type = pmu->type;
- if (list_empty(&parsed_terms.terms)) {
+ if (!const_parsed_terms || list_empty(&const_parsed_terms->terms)) {
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
@@ -1451,6 +1460,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
return evsel ? 0 : -ENOMEM;
}
+ parse_events_terms__init(&parsed_terms);
+ if (const_parsed_terms) {
+ int ret = parse_events_terms__copy(const_parsed_terms, &parsed_terms);
+
+ if (ret)
+ return ret;
+ }
+ fix_raw(&parsed_terms, pmu);
+
/* Configure attr/terms with a known PMU, this will set hardcoded terms. */
if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) {
parse_events_terms__exit(&parsed_terms);
@@ -1469,7 +1487,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
strbuf_init(&sb, /*hint=*/ 0);
parse_events_terms__to_strbuf(&parsed_terms, &sb);
- fprintf(stderr, "..after resolving event: %s/%s/\n", name, sb.buf);
+ fprintf(stderr, "..after resolving event: %s/%s/\n", pmu->name, sb.buf);
strbuf_release(&sb);
}
@@ -1583,8 +1601,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
continue;
auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
- if (!parse_events_add_pmu(parse_state, list, pmu->name,
- &parsed_terms, auto_merge_stats, loc)) {
+ if (!parse_events_add_pmu(parse_state, list, pmu,
+ &parsed_terms, auto_merge_stats)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1596,8 +1614,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
}
if (parse_state->fake_pmu) {
- if (!parse_events_add_pmu(parse_state, list, event_name, &parsed_terms,
- /*auto_merge_stats=*/true, loc)) {
+ if (!parse_events_add_pmu(parse_state, list, parse_state->fake_pmu, &parsed_terms,
+ /*auto_merge_stats=*/true)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1618,10 +1636,59 @@ out_err:
return ok ? 0 : -1;
}
-int parse_events__modifier_group(struct list_head *list,
- char *event_mod)
+int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
+ const char *event_or_pmu,
+ const struct parse_events_terms *const_parsed_terms,
+ struct list_head **listp,
+ void *loc_)
{
- return parse_events__modifier_event(list, event_mod, true);
+ YYLTYPE *loc = loc_;
+ struct perf_pmu *pmu;
+ int ok = 0;
+ char *help;
+
+ *listp = malloc(sizeof(**listp));
+ if (!*listp)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(*listp);
+
+ /* Attempt to add to list assuming event_or_pmu is a PMU name. */
+ pmu = parse_state->fake_pmu ?: perf_pmus__find(event_or_pmu);
+ if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
+ /*auto_merge_stats=*/false))
+ return 0;
+
+ pmu = NULL;
+ /* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
+ while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+ if (!parse_events__filter_pmu(parse_state, pmu) &&
+ perf_pmu__match(pmu, event_or_pmu)) {
+ bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
+
+ if (!parse_events_add_pmu(parse_state, *listp, pmu,
+ const_parsed_terms,
+ auto_merge_stats)) {
+ ok++;
+ parse_state->wild_card_pmus = true;
+ }
+ }
+ }
+ if (ok)
+ return 0;
+
+ /* Failure to add, assume event_or_pmu is an event name. */
+ zfree(listp);
+ if (!parse_events_multi_pmu_add(parse_state, event_or_pmu, const_parsed_terms, listp, loc))
+ return 0;
+
+ if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", event_or_pmu) < 0)
+ help = NULL;
+ parse_events_error__handle(parse_state->error, loc->first_column,
+ strdup("Bad event or PMU"),
+ help);
+ zfree(listp);
+ return -EINVAL;
}
void parse_events__set_leader(char *name, struct list_head *list)
@@ -1635,213 +1702,146 @@ void parse_events__set_leader(char *name, struct list_head *list)
leader = list_first_entry(list, struct evsel, core.node);
__perf_evlist__set_leader(list, &leader->core);
+ zfree(&leader->group_name);
leader->group_name = name;
}
-/* list_event is assumed to point to malloc'ed memory */
-void parse_events_update_lists(struct list_head *list_event,
- struct list_head *list_all)
+static int parse_events__modifier_list(struct parse_events_state *parse_state,
+ YYLTYPE *loc,
+ struct list_head *list,
+ struct parse_events_modifier mod,
+ bool group)
{
- /*
- * Called for single event definition. Update the
- * 'all event' list, and reinit the 'single event'
- * list, for next event definition.
- */
- list_splice_tail(list_event, list_all);
- free(list_event);
-}
-
-struct event_modifier {
- int eu;
- int ek;
- int eh;
- int eH;
- int eG;
- int eI;
- int precise;
- int precise_max;
- int exclude_GH;
- int sample_read;
- int pinned;
- int weak;
- int exclusive;
- int bpf_counter;
-};
+ struct evsel *evsel;
+
+ if (!group && mod.weak) {
+ parse_events_error__handle(parse_state->error, loc->first_column,
+ strdup("Weak modifier is for use with groups"), NULL);
+ return -EINVAL;
+ }
-static int get_event_modifier(struct event_modifier *mod, char *str,
- struct evsel *evsel)
-{
- int eu = evsel ? evsel->core.attr.exclude_user : 0;
- int ek = evsel ? evsel->core.attr.exclude_kernel : 0;
- int eh = evsel ? evsel->core.attr.exclude_hv : 0;
- int eH = evsel ? evsel->core.attr.exclude_host : 0;
- int eG = evsel ? evsel->core.attr.exclude_guest : 0;
- int eI = evsel ? evsel->core.attr.exclude_idle : 0;
- int precise = evsel ? evsel->core.attr.precise_ip : 0;
- int precise_max = 0;
- int sample_read = 0;
- int pinned = evsel ? evsel->core.attr.pinned : 0;
- int exclusive = evsel ? evsel->core.attr.exclusive : 0;
-
- int exclude = eu | ek | eh;
- int exclude_GH = evsel ? evsel->exclude_GH : 0;
- int weak = 0;
- int bpf_counter = 0;
-
- memset(mod, 0, sizeof(*mod));
-
- while (*str) {
- if (*str == 'u') {
+ __evlist__for_each_entry(list, evsel) {
+ /* Translate modifiers into the equivalent evsel excludes. */
+ int eu = group ? evsel->core.attr.exclude_user : 0;
+ int ek = group ? evsel->core.attr.exclude_kernel : 0;
+ int eh = group ? evsel->core.attr.exclude_hv : 0;
+ int eH = group ? evsel->core.attr.exclude_host : 0;
+ int eG = group ? evsel->core.attr.exclude_guest : 0;
+ int exclude = eu | ek | eh;
+ int exclude_GH = group ? evsel->exclude_GH : 0;
+
+ if (mod.precise) {
+ /* use of precise requires exclude_guest */
+ eG = 1;
+ }
+ if (mod.user) {
if (!exclude)
exclude = eu = ek = eh = 1;
if (!exclude_GH && !perf_guest)
eG = 1;
eu = 0;
- } else if (*str == 'k') {
+ }
+ if (mod.kernel) {
if (!exclude)
exclude = eu = ek = eh = 1;
ek = 0;
- } else if (*str == 'h') {
+ }
+ if (mod.hypervisor) {
if (!exclude)
exclude = eu = ek = eh = 1;
eh = 0;
- } else if (*str == 'G') {
+ }
+ if (mod.guest) {
if (!exclude_GH)
exclude_GH = eG = eH = 1;
eG = 0;
- } else if (*str == 'H') {
+ }
+ if (mod.host) {
if (!exclude_GH)
exclude_GH = eG = eH = 1;
eH = 0;
- } else if (*str == 'I') {
- eI = 1;
- } else if (*str == 'p') {
- precise++;
- /* use of precise requires exclude_guest */
- if (!exclude_GH)
- eG = 1;
- } else if (*str == 'P') {
- precise_max = 1;
- } else if (*str == 'S') {
- sample_read = 1;
- } else if (*str == 'D') {
- pinned = 1;
- } else if (*str == 'e') {
- exclusive = 1;
- } else if (*str == 'W') {
- weak = 1;
- } else if (*str == 'b') {
- bpf_counter = 1;
- } else
- break;
-
- ++str;
+ }
+ evsel->core.attr.exclude_user = eu;
+ evsel->core.attr.exclude_kernel = ek;
+ evsel->core.attr.exclude_hv = eh;
+ evsel->core.attr.exclude_host = eH;
+ evsel->core.attr.exclude_guest = eG;
+ evsel->exclude_GH = exclude_GH;
+
+ /* Simple modifiers copied to the evsel. */
+ if (mod.precise) {
+ u8 precise = evsel->core.attr.precise_ip + mod.precise;
+ /*
+ * precise ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ if (precise > 3) {
+ char *help;
+
+ if (asprintf(&help,
+ "Maximum combined precise value is 3, adding precision to \"%s\"",
+ evsel__name(evsel)) > 0) {
+ parse_events_error__handle(parse_state->error,
+ loc->first_column,
+ help, NULL);
+ }
+ return -EINVAL;
+ }
+ evsel->core.attr.precise_ip = precise;
+ }
+ if (mod.precise_max)
+ evsel->precise_max = 1;
+ if (mod.non_idle)
+ evsel->core.attr.exclude_idle = 1;
+ if (mod.sample_read)
+ evsel->sample_read = 1;
+ if (mod.pinned && evsel__is_group_leader(evsel))
+ evsel->core.attr.pinned = 1;
+ if (mod.exclusive && evsel__is_group_leader(evsel))
+ evsel->core.attr.exclusive = 1;
+ if (mod.weak)
+ evsel->weak_group = true;
+ if (mod.bpf)
+ evsel->bpf_counter = true;
}
-
- /*
- * precise ip:
- *
- * 0 - SAMPLE_IP can have arbitrary skid
- * 1 - SAMPLE_IP must have constant skid
- * 2 - SAMPLE_IP requested to have 0 skid
- * 3 - SAMPLE_IP must have 0 skid
- *
- * See also PERF_RECORD_MISC_EXACT_IP
- */
- if (precise > 3)
- return -EINVAL;
-
- mod->eu = eu;
- mod->ek = ek;
- mod->eh = eh;
- mod->eH = eH;
- mod->eG = eG;
- mod->eI = eI;
- mod->precise = precise;
- mod->precise_max = precise_max;
- mod->exclude_GH = exclude_GH;
- mod->sample_read = sample_read;
- mod->pinned = pinned;
- mod->weak = weak;
- mod->bpf_counter = bpf_counter;
- mod->exclusive = exclusive;
-
return 0;
}
-/*
- * Basic modifier sanity check to validate it contains only one
- * instance of any modifier (apart from 'p') present.
- */
-static int check_modifier(char *str)
+int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
+ struct list_head *list,
+ struct parse_events_modifier mod)
{
- char *p = str;
-
- /* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
- return -1;
-
- while (*p) {
- if (*p != 'p' && strchr(p + 1, *p))
- return -1;
- p++;
- }
-
- return 0;
+ return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/true);
}
-int parse_events__modifier_event(struct list_head *list, char *str, bool add)
+int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
+ struct list_head *list,
+ struct parse_events_modifier mod)
{
- struct evsel *evsel;
- struct event_modifier mod;
-
- if (str == NULL)
- return 0;
-
- if (check_modifier(str))
- return -EINVAL;
-
- if (!add && get_event_modifier(&mod, str, NULL))
- return -EINVAL;
-
- __evlist__for_each_entry(list, evsel) {
- if (add && get_event_modifier(&mod, str, evsel))
- return -EINVAL;
-
- evsel->core.attr.exclude_user = mod.eu;
- evsel->core.attr.exclude_kernel = mod.ek;
- evsel->core.attr.exclude_hv = mod.eh;
- evsel->core.attr.precise_ip = mod.precise;
- evsel->core.attr.exclude_host = mod.eH;
- evsel->core.attr.exclude_guest = mod.eG;
- evsel->core.attr.exclude_idle = mod.eI;
- evsel->exclude_GH = mod.exclude_GH;
- evsel->sample_read = mod.sample_read;
- evsel->precise_max = mod.precise_max;
- evsel->weak_group = mod.weak;
- evsel->bpf_counter = mod.bpf_counter;
-
- if (evsel__is_group_leader(evsel)) {
- evsel->core.attr.pinned = mod.pinned;
- evsel->core.attr.exclusive = mod.exclusive;
- }
- }
-
- return 0;
+ return parse_events__modifier_list(parse_state, loc, list, mod, /*group=*/false);
}
-int parse_events_name(struct list_head *list, const char *name)
+int parse_events__set_default_name(struct list_head *list, char *name)
{
struct evsel *evsel;
+ bool used_name = false;
__evlist__for_each_entry(list, evsel) {
if (!evsel->name) {
- evsel->name = strdup(name);
+ evsel->name = used_name ? strdup(name) : name;
+ used_name = true;
if (!evsel->name)
return -ENOMEM;
}
}
-
+ if (!used_name)
+ free(name);
return 0;
}
@@ -2121,7 +2121,7 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
struct parse_events_error *err, struct perf_pmu *fake_pmu,
- bool warn_if_reordered)
+ bool warn_if_reordered, bool fake_tp)
{
struct parse_events_state parse_state = {
.list = LIST_HEAD_INIT(parse_state.list),
@@ -2129,6 +2129,7 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
.error = err,
.stoken = PE_START_EVENTS,
.fake_pmu = fake_pmu,
+ .fake_tp = fake_tp,
.pmu_filter = pmu_filter,
.match_legacy_cache_terms = true,
};
@@ -2338,7 +2339,8 @@ int parse_events_option(const struct option *opt, const char *str,
parse_events_error__init(&err);
ret = __parse_events(*args->evlistp, str, args->pmu_filter, &err,
- /*fake_pmu=*/NULL, /*warn_if_reordered=*/true);
+ /*fake_pmu=*/NULL, /*warn_if_reordered=*/true,
+ /*fake_tp=*/false);
if (ret) {
parse_events_error__print(&err, str);
@@ -2576,7 +2578,7 @@ int parse_events_term__term(struct parse_events_term **term,
}
int parse_events_term__clone(struct parse_events_term **new,
- struct parse_events_term *term)
+ const struct parse_events_term *term)
{
char *str;
struct parse_events_term temp = *term;
@@ -2691,15 +2693,6 @@ int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct
return 0;
}
-void parse_events_evlist_error(struct parse_events_state *parse_state,
- int idx, const char *str)
-{
- if (!parse_state->error)
- return;
-
- parse_events_error__handle(parse_state->error, idx, strdup(str), NULL);
-}
-
static void config_terms_list(char *buf, size_t buf_sz)
{
int i;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 809359e854..e13de2c8b7 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -32,14 +32,14 @@ int parse_events_option_new_evlist(const struct option *opt, const char *str, in
__attribute__((nonnull(1, 2, 4)))
int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filter,
struct parse_events_error *error, struct perf_pmu *fake_pmu,
- bool warn_if_reordered);
+ bool warn_if_reordered, bool fake_tp);
__attribute__((nonnull(1, 2, 3)))
static inline int parse_events(struct evlist *evlist, const char *str,
struct parse_events_error *err)
{
return __parse_events(evlist, str, /*pmu_filter=*/NULL, err, /*fake_pmu=*/NULL,
- /*warn_if_reordered=*/true);
+ /*warn_if_reordered=*/true, /*fake_tp=*/false);
}
int parse_event(struct evlist *evlist, const char *str);
@@ -152,6 +152,8 @@ struct parse_events_state {
int stoken;
/* Special fake PMU marker for testing. */
struct perf_pmu *fake_pmu;
+ /* Skip actual tracepoint processing for testing. */
+ bool fake_tp;
/* If non-null, when wildcard matching only match the given PMU. */
const char *pmu_filter;
/* Should PE_LEGACY_NAME tokens be generated for config terms? */
@@ -178,7 +180,7 @@ int parse_events_term__term(struct parse_events_term **term,
enum parse_events__term_type term_rhs,
void *loc_term, void *loc_val);
int parse_events_term__clone(struct parse_events_term **new,
- struct parse_events_term *term);
+ const struct parse_events_term *term);
void parse_events_term__delete(struct parse_events_term *term);
void parse_events_terms__delete(struct parse_events_terms *terms);
@@ -186,33 +188,49 @@ void parse_events_terms__init(struct parse_events_terms *terms);
void parse_events_terms__exit(struct parse_events_terms *terms);
int parse_events_terms(struct parse_events_terms *terms, const char *str, FILE *input);
int parse_events_terms__to_strbuf(const struct parse_events_terms *terms, struct strbuf *sb);
-int parse_events__modifier_event(struct list_head *list, char *str, bool add);
-int parse_events__modifier_group(struct list_head *list, char *event_mod);
-int parse_events_name(struct list_head *list, const char *name);
-int parse_events_add_tracepoint(struct list_head *list, int *idx,
+
+struct parse_events_modifier {
+ u8 precise; /* Number of repeated 'p' for precision. */
+ bool precise_max : 1; /* 'P' */
+ bool non_idle : 1; /* 'I' */
+ bool sample_read : 1; /* 'S' */
+ bool pinned : 1; /* 'D' */
+ bool exclusive : 1; /* 'e' */
+ bool weak : 1; /* 'W' */
+ bool bpf : 1; /* 'b' */
+ bool user : 1; /* 'u' */
+ bool kernel : 1; /* 'k' */
+ bool hypervisor : 1; /* 'h' */
+ bool guest : 1; /* 'G' */
+ bool host : 1; /* 'H' */
+};
+
+int parse_events__modifier_event(struct parse_events_state *parse_state, void *loc,
+ struct list_head *list, struct parse_events_modifier mod);
+int parse_events__modifier_group(struct parse_events_state *parse_state, void *loc,
+ struct list_head *list, struct parse_events_modifier mod);
+int parse_events__set_default_name(struct list_head *list, char *name);
+int parse_events_add_tracepoint(struct parse_events_state *parse_state,
+ struct list_head *list,
const char *sys, const char *event,
struct parse_events_error *error,
struct parse_events_terms *head_config, void *loc);
int parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
u32 type, u64 config,
- struct parse_events_terms *head_config,
+ const struct parse_events_terms *head_config,
bool wildcard);
int parse_events_add_tool(struct parse_events_state *parse_state,
struct list_head *list,
int tool_event);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
- struct parse_events_terms *head_config);
+ struct parse_events_terms *parsed_terms);
int parse_events__decode_legacy_cache(const char *name, int pmu_type, __u64 *config);
int parse_events_add_breakpoint(struct parse_events_state *parse_state,
struct list_head *list,
u64 addr, char *type, u64 len,
struct parse_events_terms *head_config);
-int parse_events_add_pmu(struct parse_events_state *parse_state,
- struct list_head *list, const char *name,
- const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, void *loc);
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
const char *name, const char *metric_id,
@@ -223,18 +241,20 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
const struct parse_events_terms *const_parsed_terms,
struct list_head **listp, void *loc);
+int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state,
+ const char *event_or_pmu,
+ const struct parse_events_terms *const_parsed_terms,
+ struct list_head **listp,
+ void *loc_);
+
void parse_events__set_leader(char *name, struct list_head *list);
-void parse_events_update_lists(struct list_head *list_event,
- struct list_head *list_all);
-void parse_events_evlist_error(struct parse_events_state *parse_state,
- int idx, const char *str);
struct event_symbol {
const char *symbol;
const char *alias;
};
-extern struct event_symbol event_symbols_hw[];
-extern struct event_symbol event_symbols_sw[];
+extern const struct event_symbol event_symbols_hw[];
+extern const struct event_symbol event_symbols_sw[];
char *parse_events_formats_error_string(char *additional_terms);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index e86c45675e..16045c383a 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -18,26 +18,34 @@
char *parse_events_get_text(yyscan_t yyscanner);
YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
+int parse_events_get_column(yyscan_t yyscanner);
+int parse_events_get_leng(yyscan_t yyscanner);
-static int __value(YYSTYPE *yylval, char *str, int base, int token)
+static int get_column(yyscan_t scanner)
{
- u64 num;
-
- errno = 0;
- num = strtoull(str, NULL, base);
- if (errno)
- return PE_ERROR;
-
- yylval->num = num;
- return token;
+ return parse_events_get_column(scanner) - parse_events_get_leng(scanner);
}
-static int value(yyscan_t scanner, int base)
+static int value(struct parse_events_state *parse_state, yyscan_t scanner, int base)
{
YYSTYPE *yylval = parse_events_get_lval(scanner);
char *text = parse_events_get_text(scanner);
+ u64 num;
- return __value(yylval, text, base, PE_VALUE);
+ errno = 0;
+ num = strtoull(text, NULL, base);
+ if (errno) {
+ struct parse_events_error *error = parse_state->error;
+ char *help = NULL;
+
+ if (asprintf(&help, "Bad base %d number \"%s\"", base, text) > 0)
+ parse_events_error__handle(error, get_column(scanner), help , NULL);
+
+ return PE_ERROR;
+ }
+
+ yylval->num = num;
+ return PE_VALUE;
}
static int str(yyscan_t scanner, int token)
@@ -88,6 +96,11 @@ static int drv_str(yyscan_t scanner, int token)
return token;
}
+/*
+ * Use yyless to return all the characaters to the input. Update the column for
+ * location debugging. If __alloc is non-zero set yylval to the text for the
+ * returned token's value.
+ */
#define REWIND(__alloc) \
do { \
YYSTYPE *__yylval = parse_events_get_lval(yyscanner); \
@@ -134,6 +147,77 @@ static int hw_term(yyscan_t scanner, int config)
return PE_TERM_HW;
}
+static void modifiers_error(struct parse_events_state *parse_state, yyscan_t scanner,
+ int pos, char mod_char, const char *mod_name)
+{
+ struct parse_events_error *error = parse_state->error;
+ char *help = NULL;
+
+ if (asprintf(&help, "Duplicate modifier '%c' (%s)", mod_char, mod_name) > 0)
+ parse_events_error__handle(error, get_column(scanner) + pos, help , NULL);
+}
+
+static int modifiers(struct parse_events_state *parse_state, yyscan_t scanner)
+{
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+ struct parse_events_modifier mod = { .precise = 0, };
+
+ for (size_t i = 0, n = strlen(text); i < n; i++) {
+#define CASE(c, field) \
+ case c: \
+ if (mod.field) { \
+ modifiers_error(parse_state, scanner, i, c, #field); \
+ return PE_ERROR; \
+ } \
+ mod.field = true; \
+ break
+
+ switch (text[i]) {
+ CASE('u', user);
+ CASE('k', kernel);
+ CASE('h', hypervisor);
+ CASE('I', non_idle);
+ CASE('G', guest);
+ CASE('H', host);
+ case 'p':
+ mod.precise++;
+ /*
+ * precise ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ if (mod.precise > 3) {
+ struct parse_events_error *error = parse_state->error;
+ char *help = strdup("Maximum precise value is 3");
+
+ if (help) {
+ parse_events_error__handle(error, get_column(scanner) + i,
+ help , NULL);
+ }
+ return PE_ERROR;
+ }
+ break;
+ CASE('P', precise_max);
+ CASE('S', sample_read);
+ CASE('D', pinned);
+ CASE('W', weak);
+ CASE('e', exclusive);
+ CASE('b', bpf);
+ default:
+ return PE_ERROR;
+ }
+#undef CASE
+ }
+ yylval->mod = mod;
+ return PE_MODIFIER_EVENT;
+}
+
#define YY_USER_ACTION \
do { \
yylloc->last_column = yylloc->first_column; \
@@ -158,15 +242,15 @@ event [^,{}/]+
num_dec [0-9]+
num_hex 0x[a-fA-F0-9]{1,16}
num_raw_hex [a-fA-F0-9]{1,16}
-name [a-zA-Z_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]*
-name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
+name [a-zA-Z0-9_*?\[\]][a-zA-Z0-9_*?.\[\]!\-]*
+name_tag [\'][a-zA-Z0-9_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/*
* If you add a modifier you need to update check_modifier().
* Also, the letters in modifier_event must not be in modifier_bp.
*/
-modifier_event [ukhpPGHSDIWeb]+
+modifier_event [ukhpPGHSDIWeb]{1,15}
modifier_bp [rwx]{1,3}
lc_type (L1-dcache|l1-d|l1d|L1-data|L1-icache|l1-i|l1i|L1-instruction|LLC|L2|dTLB|d-tlb|Data-TLB|iTLB|i-tlb|Instruction-TLB|branch|branches|bpu|btb|bpc|node)
lc_op_result (load|loads|read|store|stores|write|prefetch|prefetches|speculative-read|speculative-load|refs|Reference|ops|access|misses|miss)
@@ -283,8 +367,8 @@ r0x{num_raw_hex} { return str(yyscanner, PE_RAW); }
*/
"/"/{digit} { return PE_BP_SLASH; }
"/"/{non_digit} { BEGIN(config); return '/'; }
-{num_dec} { return value(yyscanner, 10); }
-{num_hex} { return value(yyscanner, 16); }
+{num_dec} { return value(_parse_state, yyscanner, 10); }
+{num_hex} { return value(_parse_state, yyscanner, 16); }
/*
* We need to separate 'mem:' scanner part, in order to get specific
* modifier bits parsed out. Otherwise we would need to handle PE_NAME
@@ -330,10 +414,10 @@ cgroup-switches { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CG
{lc_type}-{lc_op_result}-{lc_op_result} { return str(yyscanner, PE_LEGACY_CACHE); }
mem: { BEGIN(mem); return PE_PREFIX_MEM; }
r{num_raw_hex} { return str(yyscanner, PE_RAW); }
-{num_dec} { return value(yyscanner, 10); }
-{num_hex} { return value(yyscanner, 16); }
+{num_dec} { return value(_parse_state, yyscanner, 10); }
+{num_hex} { return value(_parse_state, yyscanner, 16); }
-{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
+{modifier_event} { return modifiers(_parse_state, yyscanner); }
{name} { return str(yyscanner, PE_NAME); }
{name_tag} { return str(yyscanner, PE_NAME); }
"/" { BEGIN(config); return '/'; }
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d70f5d84af..b3c51f06cb 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -69,12 +69,12 @@ static void free_list_evsel(struct list_head* list_evsel)
%type <num> PE_VALUE_SYM_HW
%type <num> PE_VALUE_SYM_SW
%type <num> PE_VALUE_SYM_TOOL
+%type <mod> PE_MODIFIER_EVENT
%type <term_type> PE_TERM
%type <num> value_sym
%type <str> PE_RAW
%type <str> PE_NAME
%type <str> PE_LEGACY_CACHE
-%type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP
%type <str> PE_EVENT_NAME
%type <str> PE_DRV_CFG_TERM
@@ -111,6 +111,7 @@ static void free_list_evsel(struct list_head* list_evsel)
{
char *str;
u64 num;
+ struct parse_events_modifier mod;
enum parse_events__term_type term_type;
struct list_head *list_evsel;
struct parse_events_terms *list_terms;
@@ -126,6 +127,10 @@ static void free_list_evsel(struct list_head* list_evsel)
}
%%
+ /*
+ * Entry points. We are either parsing events or terminals. Just terminal
+ * parsing is used for parsing events in sysfs.
+ */
start:
PE_START_EVENTS start_events
|
@@ -133,31 +138,36 @@ PE_START_TERMS start_terms
start_events: groups
{
+ /* Take the parsed events, groups.. and place into parse_state. */
+ struct list_head *groups = $1;
struct parse_events_state *parse_state = _parse_state;
- /* frees $1 */
- parse_events_update_lists($1, &parse_state->list);
+ list_splice_tail(groups, &parse_state->list);
+ free(groups);
}
-groups:
+groups: /* A list of groups or events. */
groups ',' group
{
- struct list_head *list = $1;
- struct list_head *group = $3;
+ /* Merge group into the list of events/groups. */
+ struct list_head *groups = $1;
+ struct list_head *group = $3;
- /* frees $3 */
- parse_events_update_lists(group, list);
- $$ = list;
+ list_splice_tail(group, groups);
+ free(group);
+ $$ = groups;
}
|
groups ',' event
{
- struct list_head *list = $1;
+ /* Merge event into the list of events/groups. */
+ struct list_head *groups = $1;
struct list_head *event = $3;
- /* frees $3 */
- parse_events_update_lists(event, list);
- $$ = list;
+
+ list_splice_tail(event, groups);
+ free(event);
+ $$ = groups;
}
|
group
@@ -167,20 +177,13 @@ event
group:
group_def ':' PE_MODIFIER_EVENT
{
+ /* Apply the modifier to the events in the group_def. */
struct list_head *list = $1;
int err;
- err = parse_events__modifier_group(list, $3);
- free($3);
- if (err) {
- struct parse_events_state *parse_state = _parse_state;
- struct parse_events_error *error = parse_state->error;
-
- parse_events_error__handle(error, @3.first_column,
- strdup("Bad modifier"), NULL);
- free_list_evsel(list);
+ err = parse_events__modifier_group(_parse_state, &@3, list, $3);
+ if (err)
YYABORT;
- }
$$ = list;
}
|
@@ -191,7 +194,10 @@ PE_NAME '{' events '}'
{
struct list_head *list = $3;
- /* Takes ownership of $1. */
+ /*
+ * Set the first entry of list to be the leader. Set the group name on
+ * the leader to $1 taking ownership.
+ */
parse_events__set_leader($1, list);
$$ = list;
}
@@ -200,6 +206,7 @@ PE_NAME '{' events '}'
{
struct list_head *list = $2;
+ /* Set the first entry of list to be the leader clearing the group name. */
parse_events__set_leader(NULL, list);
$$ = list;
}
@@ -207,12 +214,12 @@ PE_NAME '{' events '}'
events:
events ',' event
{
+ struct list_head *events = $1;
struct list_head *event = $3;
- struct list_head *list = $1;
- /* frees $3 */
- parse_events_update_lists(event, list);
- $$ = list;
+ list_splice_tail(event, events);
+ free(event);
+ $$ = events;
}
|
event
@@ -230,17 +237,9 @@ event_name PE_MODIFIER_EVENT
* (there could be more events added for multiple tracepoint
* definitions via '*?'.
*/
- err = parse_events__modifier_event(list, $2, false);
- free($2);
- if (err) {
- struct parse_events_state *parse_state = _parse_state;
- struct parse_events_error *error = parse_state->error;
-
- parse_events_error__handle(error, @2.first_column,
- strdup("Bad modifier"), NULL);
- free_list_evsel(list);
+ err = parse_events__modifier_event(_parse_state, &@2, list, $2);
+ if (err)
YYABORT;
- }
$$ = list;
}
|
@@ -249,10 +248,14 @@ event_name
event_name:
PE_EVENT_NAME event_def
{
- int err;
+ /*
+ * When an event is parsed the text is rewound and the entire text of
+ * the event is set to the str of PE_EVENT_NAME token matched here. If
+ * no name was on an event via a term, set the name to the entire text
+ * taking ownership of the allocation.
+ */
+ int err = parse_events__set_default_name($2, $1);
- err = parse_events_name($2, $1);
- free($1);
if (err) {
free_list_evsel($2);
YYNOMEM;
@@ -273,78 +276,15 @@ event_def: event_pmu |
event_pmu:
PE_NAME opt_pmu_config
{
- struct parse_events_state *parse_state = _parse_state;
/* List of created evsels. */
struct list_head *list = NULL;
- char *pattern = NULL;
+ int err = parse_events_multi_pmu_add_or_add_pmu(_parse_state, $1, $2, &list, &@1);
-#define CLEANUP \
- do { \
- parse_events_terms__delete($2); \
- free(list); \
- free($1); \
- free(pattern); \
- } while(0)
-
- list = alloc_list();
- if (!list) {
- CLEANUP;
- YYNOMEM;
- }
- /* Attempt to add to list assuming $1 is a PMU name. */
- if (parse_events_add_pmu(parse_state, list, $1, $2, /*auto_merge_stats=*/false, &@1)) {
- struct perf_pmu *pmu = NULL;
- int ok = 0;
-
- /* Failure to add, try wildcard expansion of $1 as a PMU name. */
- if (asprintf(&pattern, "%s*", $1) < 0) {
- CLEANUP;
- YYNOMEM;
- }
-
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- const char *name = pmu->name;
-
- if (parse_events__filter_pmu(parse_state, pmu))
- continue;
-
- if (!strncmp(name, "uncore_", 7) &&
- strncmp($1, "uncore_", 7))
- name += 7;
- if (!perf_pmu__match(pattern, name, $1) ||
- !perf_pmu__match(pattern, pmu->alias_name, $1)) {
- bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
-
- if (!parse_events_add_pmu(parse_state, list, pmu->name, $2,
- auto_merge_stats, &@1)) {
- ok++;
- parse_state->wild_card_pmus = true;
- }
- }
- }
-
- if (!ok) {
- /* Failure to add, assume $1 is an event name. */
- zfree(&list);
- ok = !parse_events_multi_pmu_add(parse_state, $1, $2, &list, &@1);
- }
- if (!ok) {
- struct parse_events_error *error = parse_state->error;
- char *help;
-
- if (asprintf(&help, "Unable to find PMU or event on a PMU of '%s'", $1) < 0)
- help = NULL;
- parse_events_error__handle(error, @1.first_column,
- strdup("Bad event or PMU"),
- help);
- CLEANUP;
- YYABORT;
- }
- }
+ parse_events_terms__delete($2);
+ free($1);
+ if (err)
+ PE_ABORT(err);
$$ = list;
- list = NULL;
- CLEANUP;
-#undef CLEANUP
}
|
PE_NAME sep_dc
@@ -537,7 +477,7 @@ tracepoint_name opt_event_config
if (!list)
YYNOMEM;
- err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event,
+ err = parse_events_add_tracepoint(parse_state, list, $1.sys, $1.event,
error, $2, &@1);
parse_events_terms__delete($2);
@@ -666,6 +606,11 @@ event_term
}
name_or_raw: PE_RAW | PE_NAME | PE_LEGACY_CACHE
+|
+PE_TERM_HW
+{
+ $$ = $1.str;
+}
event_term:
PE_RAW
@@ -707,20 +652,6 @@ name_or_raw '=' PE_VALUE
$$ = term;
}
|
-name_or_raw '=' PE_TERM_HW
-{
- struct parse_events_term *term;
- int err = parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
- $1, $3.str, &@1, &@3);
-
- if (err) {
- free($1);
- free($3.str);
- PE_ABORT(err);
- }
- $$ = term;
-}
-|
PE_LEGACY_CACHE
{
struct parse_events_term *term;
@@ -773,18 +704,6 @@ PE_TERM '=' name_or_raw
$$ = term;
}
|
-PE_TERM '=' PE_TERM_HW
-{
- struct parse_events_term *term;
- int err = parse_events_term__str(&term, $1, /*config=*/NULL, $3.str, &@1, &@3);
-
- if (err) {
- free($3.str);
- PE_ABORT(err);
- }
- $$ = term;
-}
-|
PE_TERM '=' PE_TERM
{
struct parse_events_term *term;
@@ -845,9 +764,15 @@ sep_slash_slash_dc: '/' '/' | ':' |
%%
-void parse_events_error(YYLTYPE *loc, void *parse_state,
+void parse_events_error(YYLTYPE *loc, void *_parse_state,
void *scanner __maybe_unused,
char const *msg __maybe_unused)
{
- parse_events_evlist_error(parse_state, loc->last_column, "parser error");
+ struct parse_events_state *parse_state = _parse_state;
+
+ if (!parse_state->error || !list_empty(&parse_state->error->list))
+ return;
+
+ parse_events_error__handle(parse_state->error, loc->last_column,
+ strdup("Unrecognized input"), NULL);
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index cc349d9cb0..888ce99122 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -194,7 +194,7 @@ static void perf_pmu_format__load(const struct perf_pmu *pmu, struct perf_pmu_fo
* Parse & process all the sysfs attributes located under
* the directory specified in 'dir' parameter.
*/
-int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load)
+static int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load)
{
struct dirent *evt_ent;
DIR *format_dir;
@@ -244,7 +244,7 @@ int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load)
* located at:
* /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
*/
-static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name)
+static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name, bool eager_load)
{
int fd;
@@ -253,7 +253,7 @@ static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name)
return 0;
/* it'll close the fd */
- if (perf_pmu__format_parse(pmu, fd, /*eager_load=*/false))
+ if (perf_pmu__format_parse(pmu, fd, eager_load))
return -1;
return 0;
@@ -551,7 +551,8 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
unit = pe->unit;
perpkg = pe->perpkg;
deprecated = pe->deprecated;
- pmu_name = pe->pmu;
+ if (pe->pmu && strcmp(pe->pmu, "default_core"))
+ pmu_name = pe->pmu;
}
alias = zalloc(sizeof(*alias));
@@ -634,33 +635,18 @@ static inline bool pmu_alias_info_file(const char *name)
* Reading the pmu event aliases definition, which should be located at:
* /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
*/
-static int pmu_aliases_parse(struct perf_pmu *pmu)
+static int __pmu_aliases_parse(struct perf_pmu *pmu, int events_dir_fd)
{
- char path[PATH_MAX];
struct dirent *evt_ent;
DIR *event_dir;
- size_t len;
- int fd, dir_fd;
-
- len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
- if (!len)
- return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name);
-
- dir_fd = open(path, O_DIRECTORY);
- if (dir_fd == -1) {
- pmu->sysfs_aliases_loaded = true;
- return 0;
- }
- event_dir = fdopendir(dir_fd);
- if (!event_dir){
- close (dir_fd);
+ event_dir = fdopendir(events_dir_fd);
+ if (!event_dir)
return -EINVAL;
- }
while ((evt_ent = readdir(event_dir))) {
char *name = evt_ent->d_name;
+ int fd;
FILE *file;
if (!strcmp(name, ".") || !strcmp(name, ".."))
@@ -672,7 +658,7 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
if (pmu_alias_info_file(name))
continue;
- fd = openat(dir_fd, name, O_RDONLY);
+ fd = openat(events_dir_fd, name, O_RDONLY);
if (fd == -1) {
pr_debug("Cannot open %s\n", name);
continue;
@@ -691,11 +677,50 @@ static int pmu_aliases_parse(struct perf_pmu *pmu)
}
closedir(event_dir);
- close (dir_fd);
pmu->sysfs_aliases_loaded = true;
return 0;
}
+static int pmu_aliases_parse(struct perf_pmu *pmu)
+{
+ char path[PATH_MAX];
+ size_t len;
+ int events_dir_fd, ret;
+
+ if (pmu->sysfs_aliases_loaded)
+ return 0;
+
+ len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ if (!len)
+ return 0;
+ scnprintf(path + len, sizeof(path) - len, "%s/events", pmu->name);
+
+ events_dir_fd = open(path, O_DIRECTORY);
+ if (events_dir_fd == -1) {
+ pmu->sysfs_aliases_loaded = true;
+ return 0;
+ }
+ ret = __pmu_aliases_parse(pmu, events_dir_fd);
+ close(events_dir_fd);
+ return ret;
+}
+
+static int pmu_aliases_parse_eager(struct perf_pmu *pmu, int sysfs_fd)
+{
+ char path[FILENAME_MAX + 7];
+ int ret, events_dir_fd;
+
+ scnprintf(path, sizeof(path), "%s/events", pmu->name);
+ events_dir_fd = openat(sysfs_fd, path, O_DIRECTORY, 0);
+ if (events_dir_fd == -1) {
+ pmu->sysfs_aliases_loaded = true;
+ return 0;
+ }
+ ret = __pmu_aliases_parse(pmu, events_dir_fd);
+ close(events_dir_fd);
+ return ret;
+}
+
static int pmu_alias_terms(struct perf_pmu_alias *alias, int err_loc, struct list_head *terms)
{
struct parse_events_term *term, *cloned;
@@ -1034,7 +1059,8 @@ perf_pmu__arch_init(struct perf_pmu *pmu)
pmu->mem_events = perf_mem_events;
}
-struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name)
+struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name,
+ bool eager_load)
{
struct perf_pmu *pmu;
__u32 type;
@@ -1063,7 +1089,7 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
* type value and format definitions. Load both right
* now.
*/
- if (pmu_format(pmu, dirfd, name))
+ if (pmu_format(pmu, dirfd, name, eager_load))
goto err;
pmu->is_core = is_pmu_core(name);
@@ -1087,6 +1113,9 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char
perf_pmu__arch_init(pmu);
+ if (eager_load)
+ pmu_aliases_parse_eager(pmu, dirfd);
+
return pmu;
err:
zfree(&pmu->name);
@@ -1649,6 +1678,62 @@ bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name)
return false;
}
+int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb)
+{
+ static const char *const terms[] = {
+ "config=0..0xffffffffffffffff",
+ "config1=0..0xffffffffffffffff",
+ "config2=0..0xffffffffffffffff",
+ "config3=0..0xffffffffffffffff",
+ "name=string",
+ "period=number",
+ "freq=number",
+ "branch_type=(u|k|hv|any|...)",
+ "time",
+ "call-graph=(fp|dwarf|lbr)",
+ "stack-size=number",
+ "max-stack=number",
+ "nr=number",
+ "inherit",
+ "no-inherit",
+ "overwrite",
+ "no-overwrite",
+ "percore",
+ "aux-output",
+ "aux-sample-size=number",
+ };
+ struct perf_pmu_format *format;
+ int ret;
+
+ /*
+ * max-events and driver-config are missing above as are the internal
+ * types user, metric-id, raw, legacy cache and hardware. Assert against
+ * the enum parse_events__term_type so they are kept in sync.
+ */
+ _Static_assert(ARRAY_SIZE(terms) == __PARSE_EVENTS__TERM_TYPE_NR - 6,
+ "perf_pmu__for_each_format()'s terms must be kept in sync with enum parse_events__term_type");
+ list_for_each_entry(format, &pmu->format, list) {
+ perf_pmu_format__load(pmu, format);
+ ret = cb(state, format->name, (int)format->value, format->bits);
+ if (ret)
+ return ret;
+ }
+ if (!pmu->is_core)
+ return 0;
+
+ for (size_t i = 0; i < ARRAY_SIZE(terms); i++) {
+ int config = PERF_PMU_FORMAT_VALUE_CONFIG;
+
+ if (i < PERF_PMU_FORMAT_VALUE_CONFIG_END)
+ config = i;
+
+ ret = cb(state, terms[i], config, /*bits=*/NULL);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
bool is_pmu_core(const char *name)
{
return !strcmp(name, "cpu") || !strcmp(name, "cpum_cf") || is_sysfs_pmu_core(name);
@@ -1744,8 +1829,12 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
pmu_add_cpu_aliases(pmu);
list_for_each_entry(event, &pmu->aliases, list) {
size_t buf_used;
+ int pmu_name_len;
info.pmu_name = event->pmu_name ?: pmu->name;
+ pmu_name_len = skip_duplicate_pmus
+ ? pmu_name_len_no_suffix(info.pmu_name, /*num=*/NULL)
+ : (int)strlen(info.pmu_name);
info.alias = NULL;
if (event->desc) {
info.name = event->name;
@@ -1770,7 +1859,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus,
info.encoding_desc = buf + buf_used;
parse_events_terms__to_strbuf(&event->terms, &sb);
buf_used += snprintf(buf + buf_used, sizeof(buf) - buf_used,
- "%s/%s/", info.pmu_name, sb.buf) + 1;
+ "%.*s/%s/", pmu_name_len, info.pmu_name, sb.buf) + 1;
info.topic = event->topic;
info.str = sb.buf;
info.deprecated = event->deprecated;
@@ -2051,18 +2140,29 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
name ?: "N/A", buf, config_name, config);
}
-int perf_pmu__match(const char *pattern, const char *name, const char *tok)
+bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok)
{
- if (!name)
- return -1;
+ const char *name = pmu->name;
+ bool need_fnmatch = strchr(tok, '*') != NULL;
- if (fnmatch(pattern, name, 0))
- return -1;
+ if (!strncmp(tok, "uncore_", 7))
+ tok += 7;
+ if (!strncmp(name, "uncore_", 7))
+ name += 7;
- if (tok && !perf_pmu__match_ignoring_suffix(name, tok))
- return -1;
+ if (perf_pmu__match_ignoring_suffix(name, tok) ||
+ (need_fnmatch && !fnmatch(tok, name, 0)))
+ return true;
- return 0;
+ name = pmu->alias_name;
+ if (!name)
+ return false;
+
+ if (!strncmp(name, "uncore_", 7))
+ name += 7;
+
+ return perf_pmu__match_ignoring_suffix(name, tok) ||
+ (need_fnmatch && !fnmatch(tok, name, 0));
}
double __weak perf_pmu__cpu_slots_per_cycle(void)
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 77c59ebc05..b2d3fd291f 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -198,6 +198,8 @@ struct pmu_event_info {
};
typedef int (*pmu_event_callback)(void *state, struct pmu_event_info *info);
+typedef int (*pmu_format_callback)(void *state, const char *name, int config,
+ const unsigned long *bits);
void pmu_add_sys_aliases(struct perf_pmu *pmu);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
@@ -214,9 +216,9 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_
struct parse_events_error *err);
int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb);
-int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load);
void perf_pmu_format__set_value(void *format, int config, unsigned long *bits);
bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name);
+int perf_pmu__for_each_format(struct perf_pmu *pmu, void *state, pmu_format_callback cb);
bool is_pmu_core(const char *name);
bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
@@ -262,7 +264,7 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
const char *config_name);
void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
-int perf_pmu__match(const char *pattern, const char *name, const char *tok);
+bool perf_pmu__match(const struct perf_pmu *pmu, const char *tok);
double perf_pmu__cpu_slots_per_cycle(void);
int perf_pmu__event_source_devices_scnprintf(char *pathname, size_t size);
@@ -271,7 +273,8 @@ int perf_pmu__pathname_scnprintf(char *buf, size_t size,
int perf_pmu__event_source_devices_fd(void);
int perf_pmu__pathname_fd(int dirfd, const char *pmu_name, const char *filename, int flags);
-struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name);
+struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *lookup_name,
+ bool eager_load);
struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus);
void perf_pmu__delete(struct perf_pmu *pmu);
struct perf_pmu *perf_pmus__find_core_pmu(void);
diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c
index 16505071d3..6907e3e7fb 100644
--- a/tools/perf/util/pmus.c
+++ b/tools/perf/util/pmus.c
@@ -16,6 +16,7 @@
#include "pmus.h"
#include "pmu.h"
#include "print-events.h"
+#include "strbuf.h"
/*
* core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
@@ -123,7 +124,8 @@ struct perf_pmu *perf_pmus__find(const char *name)
return NULL;
dirfd = perf_pmu__event_source_devices_fd();
- pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
+ pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
+ /*eager_load=*/false);
close(dirfd);
if (!pmu) {
@@ -158,7 +160,8 @@ static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
if (core_pmu && read_sysfs_core_pmus)
return NULL;
- return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name);
+ return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
+ /*eager_load=*/false);
}
static int pmus_cmp(void *priv __maybe_unused,
@@ -474,8 +477,8 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
for (int j = 0; j < len; j++) {
/* Skip duplicates */
- if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1]))
- continue;
+ if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
+ goto free;
print_cb->print_event(print_state,
aliases[j].pmu_name,
@@ -488,6 +491,7 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
aliases[j].desc,
aliases[j].long_desc,
aliases[j].encoding_desc);
+free:
zfree(&aliases[j].name);
zfree(&aliases[j].alias);
zfree(&aliases[j].scale_unit);
@@ -503,6 +507,99 @@ void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *p
zfree(&aliases);
}
+struct build_format_string_args {
+ struct strbuf short_string;
+ struct strbuf long_string;
+ int num_formats;
+};
+
+static int build_format_string(void *state, const char *name, int config,
+ const unsigned long *bits)
+{
+ struct build_format_string_args *args = state;
+ unsigned int num_bits;
+ int ret1, ret2 = 0;
+
+ (void)config;
+ args->num_formats++;
+ if (args->num_formats > 1) {
+ strbuf_addch(&args->long_string, ',');
+ if (args->num_formats < 4)
+ strbuf_addch(&args->short_string, ',');
+ }
+ num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
+ if (num_bits <= 1) {
+ ret1 = strbuf_addf(&args->long_string, "%s", name);
+ if (args->num_formats < 4)
+ ret2 = strbuf_addf(&args->short_string, "%s", name);
+ } else if (num_bits > 8) {
+ ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
+ ULLONG_MAX >> (64 - num_bits));
+ if (args->num_formats < 4) {
+ ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
+ ULLONG_MAX >> (64 - num_bits));
+ }
+ } else {
+ ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
+ ULLONG_MAX >> (64 - num_bits));
+ if (args->num_formats < 4) {
+ ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
+ ULLONG_MAX >> (64 - num_bits));
+ }
+ }
+ return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
+}
+
+void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
+{
+ bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
+ struct perf_pmu *(*scan_fn)(struct perf_pmu *);
+ struct perf_pmu *pmu = NULL;
+
+ if (skip_duplicate_pmus)
+ scan_fn = perf_pmus__scan_skip_duplicates;
+ else
+ scan_fn = perf_pmus__scan;
+
+ while ((pmu = scan_fn(pmu)) != NULL) {
+ struct build_format_string_args format_args = {
+ .short_string = STRBUF_INIT,
+ .long_string = STRBUF_INIT,
+ .num_formats = 0,
+ };
+ int len = pmu_name_len_no_suffix(pmu->name, /*num=*/NULL);
+ const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
+
+ if (!pmu->is_core)
+ desc = NULL;
+
+ strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
+ strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
+ perf_pmu__for_each_format(pmu, &format_args, build_format_string);
+
+ if (format_args.num_formats > 3)
+ strbuf_addf(&format_args.short_string, ",.../modifier");
+ else
+ strbuf_addf(&format_args.short_string, "/modifier");
+
+ strbuf_addf(&format_args.long_string, "/modifier");
+ print_cb->print_event(print_state,
+ /*topic=*/NULL,
+ /*pmu_name=*/NULL,
+ format_args.short_string.buf,
+ /*event_alias=*/NULL,
+ /*scale_unit=*/NULL,
+ /*deprecated=*/false,
+ "Raw event descriptor",
+ desc,
+ /*long_desc=*/NULL,
+ format_args.long_string.buf);
+
+ strbuf_release(&format_args.short_string);
+ strbuf_release(&format_args.long_string);
+ }
+}
+
bool perf_pmus__have_event(const char *pname, const char *name)
{
struct perf_pmu *pmu = perf_pmus__find(pname);
@@ -602,3 +699,13 @@ struct perf_pmu *perf_pmus__find_core_pmu(void)
{
return perf_pmus__scan_core(NULL);
}
+
+struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
+{
+ /*
+ * Some PMU functions read from the sysfs mount point, so care is
+ * needed, hence passing the eager_load flag to load things like the
+ * format files.
+ */
+ return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);
+}
diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h
index 94d2a08d89..9d4ded80b8 100644
--- a/tools/perf/util/pmus.h
+++ b/tools/perf/util/pmus.h
@@ -18,9 +18,12 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu);
const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str);
void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state);
+void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state);
bool perf_pmus__have_event(const char *pname, const char *name);
int perf_pmus__num_core_pmus(void);
bool perf_pmus__supports_extended_type(void);
char *perf_pmus__default_pmu_name(void);
+struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name);
+
#endif /* __PMUS_H */
diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c
index 7b54e93854..3f38c27f01 100644
--- a/tools/perf/util/print-events.c
+++ b/tools/perf/util/print-events.c
@@ -9,6 +9,7 @@
#include <unistd.h>
#include <api/fs/tracing_path.h>
+#include <api/io.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
#include <linux/zalloc.h>
@@ -38,7 +39,7 @@ static const char * const event_type_descriptors[] = {
"Software event",
"Tracepoint event",
"Hardware cache event",
- "Raw hardware event descriptor",
+ "Raw event descriptor",
"Hardware breakpoint",
};
@@ -92,34 +93,48 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus
evt_items = scandirat(events_fd, sys_dirent->d_name, &evt_namelist, NULL, alphasort);
for (int j = 0; j < evt_items; j++) {
+ /*
+ * Buffer sized at twice the max filename length + 1
+ * separator + 1 \0 terminator.
+ */
+ char buf[NAME_MAX * 2 + 2];
+ /* 16 possible hex digits and 22 other characters and \0. */
+ char encoding[16 + 22];
struct dirent *evt_dirent = evt_namelist[j];
- char evt_path[MAXPATHLEN];
- int evt_fd;
+ struct io id;
+ __u64 config;
if (evt_dirent->d_type != DT_DIR ||
!strcmp(evt_dirent->d_name, ".") ||
!strcmp(evt_dirent->d_name, ".."))
goto next_evt;
- snprintf(evt_path, sizeof(evt_path), "%s/id", evt_dirent->d_name);
- evt_fd = openat(dir_fd, evt_path, O_RDONLY);
- if (evt_fd < 0)
+ snprintf(buf, sizeof(buf), "%s/id", evt_dirent->d_name);
+ io__init(&id, openat(dir_fd, buf, O_RDONLY), buf, sizeof(buf));
+
+ if (id.fd < 0)
+ goto next_evt;
+
+ if (io__get_dec(&id, &config) < 0) {
+ close(id.fd);
goto next_evt;
- close(evt_fd);
+ }
+ close(id.fd);
- snprintf(evt_path, MAXPATHLEN, "%s:%s",
+ snprintf(buf, sizeof(buf), "%s:%s",
sys_dirent->d_name, evt_dirent->d_name);
+ snprintf(encoding, sizeof(encoding), "tracepoint/config=0x%llx/", config);
print_cb->print_event(print_state,
/*topic=*/NULL,
- /*pmu_name=*/NULL,
- evt_path,
+ /*pmu_name=*/NULL, /* really "tracepoint" */
+ /*event_name=*/buf,
/*event_alias=*/NULL,
/*scale_unit=*/NULL,
/*deprecated=*/false,
"Tracepoint event",
/*desc=*/NULL,
/*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
+ encoding);
next_evt:
free(evt_namelist[j]);
}
@@ -401,8 +416,6 @@ void print_symbol_events(const struct print_callbacks *print_cb, void *print_sta
*/
void print_events(const struct print_callbacks *print_cb, void *print_state)
{
- char *tmp;
-
print_symbol_events(print_cb, print_state, PERF_TYPE_HARDWARE,
event_symbols_hw, PERF_COUNT_HW_MAX);
print_symbol_events(print_cb, print_state, PERF_TYPE_SOFTWARE,
@@ -426,21 +439,7 @@ void print_events(const struct print_callbacks *print_cb, void *print_state)
/*long_desc=*/NULL,
/*encoding_desc=*/NULL);
- if (asprintf(&tmp, "%s/t1=v1[,t2=v2,t3 ...]/modifier",
- perf_pmus__scan_core(/*pmu=*/NULL)->name) > 0) {
- print_cb->print_event(print_state,
- /*topic=*/NULL,
- /*pmu_name=*/NULL,
- tmp,
- /*event_alias=*/NULL,
- /*scale_unit=*/NULL,
- /*deprecated=*/false,
- event_type_descriptors[PERF_TYPE_RAW],
- "(see 'man perf-list' on how to encode it)",
- /*long_desc=*/NULL,
- /*encoding_desc=*/NULL);
- free(tmp);
- }
+ perf_pmus__print_raw_pmu_events(print_cb, print_state);
print_cb->print_event(print_state,
/*topic=*/NULL,
diff --git a/tools/perf/util/print_insn.c b/tools/perf/util/print_insn.c
index 459e0e93d7..a950e9157d 100644
--- a/tools/perf/util/print_insn.c
+++ b/tools/perf/util/print_insn.c
@@ -4,6 +4,7 @@
*
* Author(s): Changbin Du <changbin.du@huawei.com>
*/
+#include <inttypes.h>
#include <string.h>
#include <stdbool.h>
#include "debug.h"
@@ -12,6 +13,9 @@
#include "machine.h"
#include "thread.h"
#include "print_insn.h"
+#include "dump-insn.h"
+#include "map.h"
+#include "dso.h"
size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp)
{
@@ -28,12 +32,12 @@ size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp)
#ifdef HAVE_LIBCAPSTONE_SUPPORT
#include <capstone/capstone.h>
-static int capstone_init(struct machine *machine, csh *cs_handle)
+static int capstone_init(struct machine *machine, csh *cs_handle, bool is64)
{
cs_arch arch;
cs_mode mode;
- if (machine__is(machine, "x86_64")) {
+ if (machine__is(machine, "x86_64") && is64) {
arch = CS_ARCH_X86;
mode = CS_MODE_64;
} else if (machine__normalized_is(machine, "x86")) {
@@ -69,8 +73,8 @@ static int capstone_init(struct machine *machine, csh *cs_handle)
return 0;
}
-static size_t print_insn_x86(struct perf_sample *sample, struct thread *thread,
- cs_insn *insn, FILE *fp)
+static size_t print_insn_x86(struct thread *thread, u8 cpumode, cs_insn *insn,
+ int print_opts, FILE *fp)
{
struct addr_location al;
size_t printed = 0;
@@ -80,9 +84,11 @@ static size_t print_insn_x86(struct perf_sample *sample, struct thread *thread,
addr_location__init(&al);
if (op->type == X86_OP_IMM &&
- thread__find_symbol(thread, sample->cpumode, op->imm, &al)) {
+ thread__find_symbol(thread, cpumode, op->imm, &al)) {
printed += fprintf(fp, "%s ", insn[0].mnemonic);
printed += symbol__fprintf_symname_offs(al.sym, &al, fp);
+ if (print_opts & PRINT_INSN_IMM_HEX)
+ printed += fprintf(fp, " [%#" PRIx64 "]", op->imm);
addr_location__exit(&al);
return printed;
}
@@ -93,42 +99,71 @@ static size_t print_insn_x86(struct perf_sample *sample, struct thread *thread,
return printed;
}
-size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
- struct machine *machine, FILE *fp)
+static bool is64bitip(struct machine *machine, struct addr_location *al)
{
- csh cs_handle;
+ const struct dso *dso = al->map ? map__dso(al->map) : NULL;
+
+ if (dso)
+ return dso__is_64_bit(dso);
+
+ return machine__is(machine, "x86_64") ||
+ machine__normalized_is(machine, "arm64") ||
+ machine__normalized_is(machine, "s390");
+}
+
+ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
+ bool is64bit, const uint8_t *code, size_t code_size,
+ uint64_t ip, int *lenp, int print_opts, FILE *fp)
+{
+ size_t printed;
cs_insn *insn;
+ csh cs_handle;
size_t count;
- size_t printed = 0;
int ret;
/* TODO: Try to initiate capstone only once but need a proper place. */
- ret = capstone_init(machine, &cs_handle);
- if (ret < 0) {
- /* fallback */
- return sample__fprintf_insn_raw(sample, fp);
- }
+ ret = capstone_init(machine, &cs_handle, is64bit);
+ if (ret < 0)
+ return ret;
- count = cs_disasm(cs_handle, (uint8_t *)sample->insn, sample->insn_len,
- sample->ip, 1, &insn);
+ count = cs_disasm(cs_handle, code, code_size, ip, 1, &insn);
if (count > 0) {
if (machine__normalized_is(machine, "x86"))
- printed += print_insn_x86(sample, thread, &insn[0], fp);
+ printed = print_insn_x86(thread, cpumode, &insn[0], print_opts, fp);
else
- printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
+ printed = fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str);
+ if (lenp)
+ *lenp = insn->size;
cs_free(insn, count);
} else {
- printed += fprintf(fp, "illegal instruction");
+ printed = -1;
}
cs_close(&cs_handle);
return printed;
}
+
+size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine, FILE *fp,
+ struct addr_location *al)
+{
+ bool is64bit = is64bitip(machine, al);
+ ssize_t printed;
+
+ printed = fprintf_insn_asm(machine, thread, sample->cpumode, is64bit,
+ (uint8_t *)sample->insn, sample->insn_len,
+ sample->ip, NULL, 0, fp);
+ if (printed < 0)
+ return sample__fprintf_insn_raw(sample, fp);
+
+ return printed;
+}
#else
size_t sample__fprintf_insn_asm(struct perf_sample *sample __maybe_unused,
struct thread *thread __maybe_unused,
struct machine *machine __maybe_unused,
- FILE *fp __maybe_unused)
+ FILE *fp __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
return 0;
}
diff --git a/tools/perf/util/print_insn.h b/tools/perf/util/print_insn.h
index 465bdcfcc2..07d11af3fc 100644
--- a/tools/perf/util/print_insn.h
+++ b/tools/perf/util/print_insn.h
@@ -8,9 +8,15 @@
struct perf_sample;
struct thread;
struct machine;
+struct perf_insn;
+
+#define PRINT_INSN_IMM_HEX (1<<0)
size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread,
- struct machine *machine, FILE *fp);
+ struct machine *machine, FILE *fp, struct addr_location *al);
size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp);
+ssize_t fprintf_insn_asm(struct machine *machine, struct thread *thread, u8 cpumode,
+ bool is64bit, const uint8_t *code, size_t code_size,
+ uint64_t ip, int *lenp, int print_opts, FILE *fp);
#endif /* PERF_PRINT_INSN_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 5c12459e97..a17c9b8a7a 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -159,8 +159,8 @@ static int kernel_get_module_map_cb(struct map *map, void *data)
{
struct kernel_get_module_map_cb_args *args = data;
struct dso *dso = map__dso(map);
- const char *short_name = dso->short_name; /* short_name is "[module]" */
- u16 short_name_len = dso->short_name_len;
+ const char *short_name = dso__short_name(dso);
+ u16 short_name_len = dso__short_name_len(dso);
if (strncmp(short_name + 1, args->module, short_name_len - 2) == 0 &&
args->module[short_name_len - 2] == '\0') {
@@ -202,10 +202,9 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user)
map = dso__new_map(target);
dso = map ? map__dso(map) : NULL;
if (dso) {
- mutex_lock(&dso->lock);
- nsinfo__put(dso->nsinfo);
- dso->nsinfo = nsinfo__get(nsi);
- mutex_unlock(&dso->lock);
+ mutex_lock(dso__lock(dso));
+ dso__set_nsinfo(dso, nsinfo__get(nsi));
+ mutex_unlock(dso__lock(dso));
}
return map;
} else {
@@ -236,7 +235,7 @@ static int convert_exec_to_group(const char *exec, char **result)
}
}
- ret = e_snprintf(buf, 64, "%s_%s", PERFPROBE_GROUP, ptr1);
+ ret = e_snprintf(buf, sizeof(buf), "%s_%s", PERFPROBE_GROUP, ptr1);
if (ret < 0)
goto out;
@@ -368,11 +367,11 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
map = machine__kernel_map(host_machine);
dso = map__dso(map);
- if (!dso->has_build_id)
+ if (!dso__has_build_id(dso))
dso__read_running_kernel_build_id(dso, host_machine);
vmlinux_name = symbol_conf.vmlinux_name;
- dso->load_errno = 0;
+ *dso__load_errno(dso) = 0;
if (vmlinux_name)
ret = dso__load_vmlinux(dso, map, vmlinux_name, false);
else
@@ -499,7 +498,7 @@ static struct debuginfo *open_from_debuginfod(struct dso *dso, struct nsinfo *ns
if (!c)
return NULL;
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
fd = debuginfod_find_debuginfo(c, (const unsigned char *)sbuild_id,
0, &path);
if (fd >= 0)
@@ -542,7 +541,7 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
if (!module || !strchr(module, '/')) {
err = kernel_get_module_dso(module, &dso);
if (err < 0) {
- if (!dso || dso->load_errno == 0) {
+ if (!dso || *dso__load_errno(dso) == 0) {
if (!str_error_r(-err, reason, STRERR_BUFSIZE))
strcpy(reason, "(unknown)");
} else
@@ -559,7 +558,7 @@ static struct debuginfo *open_debuginfo(const char *module, struct nsinfo *nsi,
}
return NULL;
}
- path = dso->long_name;
+ path = dso__long_name(dso);
}
nsinfo__mountns_enter(nsi, &nsc);
ret = debuginfo__new(path);
@@ -2758,7 +2757,7 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
/* Try no suffix number */
ret = e_snprintf(buf, len, "%s%s", nbase, ret_event ? "__return" : "");
if (ret < 0) {
- pr_debug("snprintf() failed: %d\n", ret);
+ pr_warning("snprintf() failed: %d; the event name nbase='%s' is too long\n", ret, nbase);
goto out;
}
if (!strlist__has_entry(namelist, buf))
@@ -2867,7 +2866,7 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
group = PERFPROBE_GROUP;
/* Get an unused new event name */
- ret = get_new_event_name(buf, 64, event, namelist,
+ ret = get_new_event_name(buf, sizeof(buf), event, namelist,
tev->point.retprobe, allow_suffix);
if (ret < 0)
return ret;
@@ -3795,8 +3794,8 @@ int show_available_funcs(const char *target, struct nsinfo *nsi,
/* Show all (filtered) symbols */
setup_pager();
- for (size_t i = 0; i < dso->symbol_names_len; i++) {
- struct symbol *pos = dso->symbol_names[i];
+ for (size_t i = 0; i < dso__symbol_names_len(dso); i++) {
+ struct symbol *pos = dso__symbol_names(dso)[i];
if (strfilter__compare(_filter, pos->name))
printf("%s\n", pos->name);
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c8923375e3..630e16c54e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -186,8 +186,6 @@ static_var:
return ret2;
}
-#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long))
-
static int convert_variable_type(Dwarf_Die *vr_die,
struct probe_trace_arg *tvar,
const char *cast, bool user_access)
@@ -217,7 +215,7 @@ static int convert_variable_type(Dwarf_Die *vr_die,
total = dwarf_bytesize(vr_die);
if (boffs < 0 || total < 0)
return -ENOENT;
- ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs,
+ ret = snprintf(buf, 16, "b%d@%d/%d", bsize, boffs,
BYTES_TO_BITS(total));
goto formatted;
}
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
index 87e817b3cf..e867de8dda 100644
--- a/tools/perf/util/record.c
+++ b/tools/perf/util/record.c
@@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str)
evsel = evlist__last(temp_evlist);
- if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
+ if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus();
if (cpus)
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index b072ac5d3b..e16257d5ab 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -320,10 +320,10 @@ static SV *perl_process_callchain(struct perf_sample *sample,
const char *dsoname = "[unknown]";
if (dso) {
- if (symbol_conf.show_kernel_path && dso->long_name)
- dsoname = dso->long_name;
+ if (symbol_conf.show_kernel_path && dso__long_name(dso))
+ dsoname = dso__long_name(dso);
else
- dsoname = dso->name;
+ dsoname = dso__name(dso);
}
if (!hv_stores(elem, "dso", newSVpv(dsoname,0))) {
hv_undef(elem);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index b4f0f60e60..fb00f3ad68 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -45,6 +45,7 @@
#include "../thread.h"
#include "../comm.h"
#include "../machine.h"
+#include "../mem-info.h"
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
@@ -393,10 +394,10 @@ static const char *get_dsoname(struct map *map)
struct dso *dso = map ? map__dso(map) : NULL;
if (dso) {
- if (symbol_conf.show_kernel_path && dso->long_name)
- dsoname = dso->long_name;
+ if (symbol_conf.show_kernel_path && dso__long_name(dso))
+ dsoname = dso__long_name(dso);
else
- dsoname = dso->name;
+ dsoname = dso__name(dso);
}
return dsoname;
@@ -720,15 +721,20 @@ static void set_sample_read_in_dict(PyObject *dict_sample,
}
static void set_sample_datasrc_in_dict(PyObject *dict,
- struct perf_sample *sample)
+ struct perf_sample *sample)
{
- struct mem_info mi = { .data_src.val = sample->data_src };
+ struct mem_info *mi = mem_info__new();
char decode[100];
+ if (!mi)
+ Py_FatalError("couldn't create mem-info");
+
pydict_set_item_string_decref(dict, "datasrc",
PyLong_FromUnsignedLongLong(sample->data_src));
- perf_script__meminfo_scnprintf(decode, 100, &mi);
+ mem_info__data_src(mi)->val = sample->data_src;
+ perf_script__meminfo_scnprintf(decode, 100, mi);
+ mem_info__put(mi);
pydict_set_item_string_decref(dict, "datasrc_decode",
_PyUnicode_FromString(decode));
@@ -799,8 +805,9 @@ static void set_sym_in_dict(PyObject *dict, struct addr_location *al,
if (al->map) {
struct dso *dso = map__dso(al->map);
- pydict_set_item_string_decref(dict, dso_field, _PyUnicode_FromString(dso->name));
- build_id__sprintf(&dso->bid, sbuild_id);
+ pydict_set_item_string_decref(dict, dso_field,
+ _PyUnicode_FromString(dso__name(dso)));
+ build_id__sprintf(dso__bid(dso), sbuild_id);
pydict_set_item_string_decref(dict, dso_bid_field,
_PyUnicode_FromString(sbuild_id));
pydict_set_item_string_decref(dict, dso_map_start,
@@ -1246,14 +1253,14 @@ static int python_export_dso(struct db_export *dbe, struct dso *dso,
char sbuild_id[SBUILD_ID_SIZE];
PyObject *t;
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
t = tuple_new(5);
- tuple_set_d64(t, 0, dso->db_id);
+ tuple_set_d64(t, 0, dso__db_id(dso));
tuple_set_d64(t, 1, machine->db_id);
- tuple_set_string(t, 2, dso->short_name);
- tuple_set_string(t, 3, dso->long_name);
+ tuple_set_string(t, 2, dso__short_name(dso));
+ tuple_set_string(t, 3, dso__long_name(dso));
tuple_set_string(t, 4, sbuild_id);
call_object(tables->dso_handler, t, "dso_table");
@@ -1273,7 +1280,7 @@ static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
t = tuple_new(6);
tuple_set_d64(t, 0, *sym_db_id);
- tuple_set_d64(t, 1, dso->db_id);
+ tuple_set_d64(t, 1, dso__db_id(dso));
tuple_set_d64(t, 2, sym->start);
tuple_set_d64(t, 3, sym->end);
tuple_set_s32(t, 4, sym->binding);
@@ -1699,13 +1706,15 @@ static void python_process_stat(struct perf_stat_config *config,
{
struct perf_thread_map *threads = counter->core.threads;
struct perf_cpu_map *cpus = counter->core.cpus;
- int cpu, thread;
- for (thread = 0; thread < perf_thread_map__nr(threads); thread++) {
- for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
- process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
+ for (int thread = 0; thread < perf_thread_map__nr(threads); thread++) {
+ int idx;
+ struct perf_cpu cpu;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ process_stat(counter, cpu,
perf_thread_map__pid(threads, thread), tstamp,
- perf_counts(counter->counts, cpu, thread));
+ perf_counts(counter->counts, idx, thread));
}
}
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 06d0bd7fb4..a10343b9dc 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -2749,6 +2749,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
int i, err = -1;
struct perf_cpu_map *map;
int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
+ struct perf_cpu cpu;
for (i = 0; i < PERF_TYPE_MAX; ++i) {
struct evsel *evsel;
@@ -2770,9 +2771,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
return -1;
}
- for (i = 0; i < perf_cpu_map__nr(map); i++) {
- struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
-
+ perf_cpu_map__for_each_cpu(cpu, i, map) {
if (cpu.cpu >= nr_cpus) {
pr_err("Requested CPU %d too large. "
"Consider raising MAX_NR_CPUS\n", cpu.cpu);
@@ -2917,3 +2916,24 @@ int perf_event__process_id_index(struct perf_session *session,
}
return 0;
}
+
+int perf_session__dsos_hit_all(struct perf_session *session)
+{
+ struct rb_node *nd;
+ int err;
+
+ err = machine__hit_all_dsos(&session->machines.host);
+ if (err)
+ return err;
+
+ for (nd = rb_first_cached(&session->machines.guests); nd;
+ nd = rb_next(nd)) {
+ struct machine *pos = rb_entry(nd, struct machine, rb_node);
+
+ err = machine__hit_all_dsos(pos);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 5064c6ec11..3b0256e977 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -156,6 +156,8 @@ int perf_session__deliver_synth_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample);
+int perf_session__dsos_hit_all(struct perf_session *session);
+
int perf_event__process_id_index(struct perf_session *session,
union perf_event *event);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 92a1bd695e..ab7c7ff35f 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -23,6 +23,7 @@
#include "strlist.h"
#include "strbuf.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "annotate.h"
#include "annotate-data.h"
#include "event.h"
@@ -239,11 +240,11 @@ static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
return cmp_null(dso_r, dso_l);
if (verbose > 0) {
- dso_name_l = dso_l->long_name;
- dso_name_r = dso_r->long_name;
+ dso_name_l = dso__long_name(dso_l);
+ dso_name_r = dso__long_name(dso_r);
} else {
- dso_name_l = dso_l->short_name;
- dso_name_r = dso_r->short_name;
+ dso_name_l = dso__short_name(dso_l);
+ dso_name_r = dso__short_name(dso_r);
}
return strcmp(dso_name_l, dso_name_r);
@@ -262,7 +263,7 @@ static int _hist_entry__dso_snprintf(struct map *map, char *bf,
const char *dso_name = "[unknown]";
if (dso)
- dso_name = verbose > 0 ? dso->long_name : dso->short_name;
+ dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
}
@@ -333,7 +334,7 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
- if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
+ if (!hists__has(left->hists, dso)) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
@@ -364,7 +365,7 @@ static int _hist_entry__sym_snprintf(struct map_symbol *ms,
char o = dso ? dso__symtab_origin(dso) : '!';
u64 rip = ip;
- if (dso && dso->kernel && dso->adjust_symbols)
+ if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
rip = map__unmap_ip(map, ip);
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
@@ -1364,9 +1365,9 @@ sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.addr;
+ l = mem_info__daddr(left->mem_info)->addr;
if (right->mem_info)
- r = right->mem_info->daddr.addr;
+ r = mem_info__daddr(right->mem_info)->addr;
return (int64_t)(r - l);
}
@@ -1378,8 +1379,8 @@ static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
struct map_symbol *ms = NULL;
if (he->mem_info) {
- addr = he->mem_info->daddr.addr;
- ms = &he->mem_info->daddr.ms;
+ addr = mem_info__daddr(he->mem_info)->addr;
+ ms = &mem_info__daddr(he->mem_info)->ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
@@ -1390,9 +1391,9 @@ sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->iaddr.addr;
+ l = mem_info__iaddr(left->mem_info)->addr;
if (right->mem_info)
- r = right->mem_info->iaddr.addr;
+ r = mem_info__iaddr(right->mem_info)->addr;
return (int64_t)(r - l);
}
@@ -1404,8 +1405,8 @@ static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
struct map_symbol *ms = NULL;
if (he->mem_info) {
- addr = he->mem_info->iaddr.addr;
- ms = &he->mem_info->iaddr.ms;
+ addr = mem_info__iaddr(he->mem_info)->addr;
+ ms = &mem_info__iaddr(he->mem_info)->ms;
}
return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
}
@@ -1417,9 +1418,9 @@ sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
struct map *map_r = NULL;
if (left->mem_info)
- map_l = left->mem_info->daddr.ms.map;
+ map_l = mem_info__daddr(left->mem_info)->ms.map;
if (right->mem_info)
- map_r = right->mem_info->daddr.ms.map;
+ map_r = mem_info__daddr(right->mem_info)->ms.map;
return _sort__dso_cmp(map_l, map_r);
}
@@ -1430,7 +1431,7 @@ static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
struct map *map = NULL;
if (he->mem_info)
- map = he->mem_info->daddr.ms.map;
+ map = mem_info__daddr(he->mem_info)->ms.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
@@ -1442,12 +1443,12 @@ sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_lock = PERF_MEM_LOCK_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_lock = PERF_MEM_LOCK_NA;
@@ -1470,12 +1471,12 @@ sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
@@ -1498,12 +1499,12 @@ sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_lvl = PERF_MEM_LVL_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_lvl = PERF_MEM_LVL_NA;
@@ -1526,12 +1527,12 @@ sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
@@ -1562,8 +1563,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
if (left->cpumode > right->cpumode) return -1;
if (left->cpumode < right->cpumode) return 1;
- l_map = left->mem_info->daddr.ms.map;
- r_map = right->mem_info->daddr.ms.map;
+ l_map = mem_info__daddr(left->mem_info)->ms.map;
+ r_map = mem_info__daddr(right->mem_info)->ms.map;
/* if both are NULL, jump to sort on al_addr instead */
if (!l_map && !r_map)
@@ -1586,8 +1587,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
*/
if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
- (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
- !l_dso->id.ino && !l_dso->id.ino_generation) {
+ (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
+ !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
/* userspace anonymous */
if (thread__pid(left->thread) > thread__pid(right->thread))
@@ -1598,8 +1599,8 @@ sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
addr:
/* al_addr does all the right addr - start + offset calculations */
- l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
- r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
+ l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
+ r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
if (l > r) return -1;
if (l < r) return 1;
@@ -1616,17 +1617,18 @@ static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
char level = he->level;
if (he->mem_info) {
- struct map *map = he->mem_info->daddr.ms.map;
+ struct map *map = mem_info__daddr(he->mem_info)->ms.map;
struct dso *dso = map ? map__dso(map) : NULL;
- addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
- ms = &he->mem_info->daddr.ms;
+ addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
+ ms = &mem_info__daddr(he->mem_info)->ms;
/* print [s] for shared data mmaps */
if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
map && !(map__prot(map) & PROT_EXEC) &&
(map__flags(map) & MAP_SHARED) &&
- (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
+ (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
+ dso__id(dso)->ino_generation))
level = 's';
else if (!map)
level = 'X';
@@ -1804,12 +1806,12 @@ sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
union perf_mem_data_src data_src_r;
if (left->mem_info)
- data_src_l = left->mem_info->data_src;
+ data_src_l = *mem_info__data_src(left->mem_info);
else
data_src_l.mem_blk = PERF_MEM_BLK_NA;
if (right->mem_info)
- data_src_r = right->mem_info->data_src;
+ data_src_r = *mem_info__data_src(right->mem_info);
else
data_src_r.mem_blk = PERF_MEM_BLK_NA;
@@ -1838,9 +1840,9 @@ sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.phys_addr;
+ l = mem_info__daddr(left->mem_info)->phys_addr;
if (right->mem_info)
- r = right->mem_info->daddr.phys_addr;
+ r = mem_info__daddr(right->mem_info)->phys_addr;
return (int64_t)(r - l);
}
@@ -1852,7 +1854,7 @@ static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
size_t ret = 0;
size_t len = BITS_PER_LONG / 4;
- addr = he->mem_info->daddr.phys_addr;
+ addr = mem_info__daddr(he->mem_info)->phys_addr;
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
@@ -1879,9 +1881,9 @@ sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = left->mem_info->daddr.data_page_size;
+ l = mem_info__daddr(left->mem_info)->data_page_size;
if (right->mem_info)
- r = right->mem_info->daddr.data_page_size;
+ r = mem_info__daddr(right->mem_info)->data_page_size;
return (int64_t)(r - l);
}
@@ -1892,7 +1894,7 @@ static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
char str[PAGE_SIZE_NAME_LEN];
return repsep_snprintf(bf, size, "%-*s", width,
- get_page_size_name(he->mem_info->daddr.data_page_size, str));
+ get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
}
struct sort_entry sort_mem_data_page_size = {
@@ -2441,6 +2443,13 @@ static struct hpp_dimension hpp_sort_dimensions[] = {
DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
DIM(PERF_HPP__SAMPLES, "sample"),
DIM(PERF_HPP__PERIOD, "period"),
+ DIM(PERF_HPP__WEIGHT1, "weight1"),
+ DIM(PERF_HPP__WEIGHT2, "weight2"),
+ DIM(PERF_HPP__WEIGHT3, "weight3"),
+ /* aliases for weight_struct */
+ DIM(PERF_HPP__WEIGHT2, "ins_lat"),
+ DIM(PERF_HPP__WEIGHT3, "retire_lat"),
+ DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
};
#undef DIM
@@ -3743,26 +3752,29 @@ void sort__setup_elide(FILE *output)
}
}
-int output_field_add(struct perf_hpp_list *list, char *tok)
+int output_field_add(struct perf_hpp_list *list, const char *tok)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
- struct sort_dimension *sd = &common_sort_dimensions[i];
+ for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
+ struct hpp_dimension *hd = &hpp_sort_dimensions[i];
- if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
+ if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
- return __sort_dimension__add_output(list, sd);
+ if (!strcasecmp(tok, "weight"))
+ ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
+
+ return __hpp_dimension__add_output(list, hd);
}
- for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
- struct hpp_dimension *hd = &hpp_sort_dimensions[i];
+ for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
+ struct sort_dimension *sd = &common_sort_dimensions[i];
- if (strncasecmp(tok, hd->name, strlen(tok)))
+ if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
continue;
- return __hpp_dimension__add_output(list, hd);
+ return __sort_dimension__add_output(list, sd);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 6f6b4189a3..0bd0ee3ae7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -3,19 +3,9 @@
#define __PERF_SORT_H
#include <regex.h>
#include <stdbool.h>
-#include <linux/list.h>
-#include <linux/rbtree.h>
-#include "map_symbol.h"
-#include "symbol_conf.h"
-#include "callchain.h"
-#include "values.h"
#include "hist.h"
-#include "stat.h"
-#include "spark.h"
struct option;
-struct thread;
-struct annotated_data_type;
extern regex_t parent_regex;
extern const char *sort_order;
@@ -39,175 +29,6 @@ extern struct sort_entry sort_type;
extern const char default_mem_sort_order[];
extern bool chk_double_cl;
-struct res_sample {
- u64 time;
- int cpu;
- int tid;
-};
-
-struct he_stat {
- u64 period;
- u64 period_sys;
- u64 period_us;
- u64 period_guest_sys;
- u64 period_guest_us;
- u32 nr_events;
-};
-
-struct namespace_id {
- u64 dev;
- u64 ino;
-};
-
-struct hist_entry_diff {
- bool computed;
- union {
- /* PERF_HPP__DELTA */
- double period_ratio_delta;
-
- /* PERF_HPP__RATIO */
- double period_ratio;
-
- /* HISTC_WEIGHTED_DIFF */
- s64 wdiff;
-
- /* PERF_HPP_DIFF__CYCLES */
- s64 cycles;
- };
- struct stats stats;
- unsigned long svals[NUM_SPARKS];
-};
-
-struct hist_entry_ops {
- void *(*new)(size_t size);
- void (*free)(void *ptr);
-};
-
-/**
- * struct hist_entry - histogram entry
- *
- * @row_offset - offset from the first callchain expanded to appear on screen
- * @nr_rows - rows expanded in callchain, recalculated on folding/unfolding
- */
-struct hist_entry {
- struct rb_node rb_node_in;
- struct rb_node rb_node;
- union {
- struct list_head node;
- struct list_head head;
- } pairs;
- struct he_stat stat;
- struct he_stat *stat_acc;
- struct map_symbol ms;
- struct thread *thread;
- struct comm *comm;
- struct namespace_id cgroup_id;
- u64 cgroup;
- u64 ip;
- u64 transaction;
- s32 socket;
- s32 cpu;
- u64 code_page_size;
- u64 weight;
- u64 ins_lat;
- u64 p_stage_cyc;
- u8 cpumode;
- u8 depth;
- int mem_type_off;
- struct simd_flags simd_flags;
-
- /* We are added by hists__add_dummy_entry. */
- bool dummy;
- bool leaf;
-
- char level;
- u8 filtered;
-
- u16 callchain_size;
- union {
- /*
- * Since perf diff only supports the stdio output, TUI
- * fields are only accessed from perf report (or perf
- * top). So make it a union to reduce memory usage.
- */
- struct hist_entry_diff diff;
- struct /* for TUI */ {
- u16 row_offset;
- u16 nr_rows;
- bool init_have_children;
- bool unfolded;
- bool has_children;
- bool has_no_entry;
- };
- };
- char *srcline;
- char *srcfile;
- struct symbol *parent;
- struct branch_info *branch_info;
- long time;
- struct hists *hists;
- struct mem_info *mem_info;
- struct block_info *block_info;
- struct kvm_info *kvm_info;
- void *raw_data;
- u32 raw_size;
- int num_res;
- struct res_sample *res_samples;
- void *trace_output;
- struct perf_hpp_list *hpp_list;
- struct hist_entry *parent_he;
- struct hist_entry_ops *ops;
- struct annotated_data_type *mem_type;
- union {
- /* this is for hierarchical entry structure */
- struct {
- struct rb_root_cached hroot_in;
- struct rb_root_cached hroot_out;
- }; /* non-leaf entries */
- struct rb_root sorted_chain; /* leaf entry has callchains */
- };
- struct callchain_root callchain[0]; /* must be last member */
-};
-
-static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
-{
- return he->callchain_size != 0;
-}
-
-int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width);
-
-static inline bool hist_entry__has_pairs(struct hist_entry *he)
-{
- return !list_empty(&he->pairs.node);
-}
-
-static inline struct hist_entry *hist_entry__next_pair(struct hist_entry *he)
-{
- if (hist_entry__has_pairs(he))
- return list_entry(he->pairs.node.next, struct hist_entry, pairs.node);
- return NULL;
-}
-
-static inline void hist_entry__add_pair(struct hist_entry *pair,
- struct hist_entry *he)
-{
- list_add_tail(&pair->pairs.node, &he->pairs.head);
-}
-
-static inline float hist_entry__get_percent_limit(struct hist_entry *he)
-{
- u64 period = he->stat.period;
- u64 total_period = hists__total_period(he->hists);
-
- if (unlikely(total_period == 0))
- return 0;
-
- if (symbol_conf.cumulate_callchain)
- period = he->stat_acc->period;
-
- return period * 100.0 / total_period;
-}
-
enum sort_mode {
SORT_MODE__NORMAL,
SORT_MODE__BRANCH,
@@ -299,15 +120,6 @@ struct sort_entry {
u8 se_width_idx;
};
-struct block_hist {
- struct hists block_hists;
- struct perf_hpp_list block_list;
- struct perf_hpp_fmt block_fmt;
- int block_idx;
- bool valid;
- struct hist_entry he;
-};
-
extern struct sort_entry sort_thread;
struct evlist;
@@ -329,7 +141,7 @@ void reset_dimensions(void);
int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
struct evlist *evlist,
int level);
-int output_field_add(struct perf_hpp_list *list, char *tok);
+int output_field_add(struct perf_hpp_list *list, const char *tok);
int64_t
sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 7addc34afc..4d67c1e095 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -27,14 +27,14 @@ bool srcline_full_filename;
char *srcline__unknown = (char *)"??:0";
-static const char *dso__name(struct dso *dso)
+static const char *srcline_dso_name(struct dso *dso)
{
const char *dso_name;
- if (dso->symsrc_filename)
- dso_name = dso->symsrc_filename;
+ if (dso__symsrc_filename(dso))
+ dso_name = dso__symsrc_filename(dso);
else
- dso_name = dso->long_name;
+ dso_name = dso__long_name(dso);
if (dso_name[0] == '[')
return NULL;
@@ -288,7 +288,7 @@ static int inline_list__append_dso_a2l(struct dso *dso,
struct inline_node *node,
struct symbol *sym)
{
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
struct symbol *inline_sym = new_inline_sym(dso, sym, a2l->funcname);
char *srcline = NULL;
@@ -304,11 +304,11 @@ static int addr2line(const char *dso_name, u64 addr,
struct symbol *sym)
{
int ret = 0;
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
if (!a2l) {
- dso->a2l = addr2line_init(dso_name);
- a2l = dso->a2l;
+ a2l = addr2line_init(dso_name);
+ dso__set_a2l(dso, a2l);
}
if (a2l == NULL) {
@@ -360,14 +360,14 @@ static int addr2line(const char *dso_name, u64 addr,
void dso__free_a2l(struct dso *dso)
{
- struct a2l_data *a2l = dso->a2l;
+ struct a2l_data *a2l = dso__a2l(dso);
if (!a2l)
return;
addr2line_cleanup(a2l);
- dso->a2l = NULL;
+ dso__set_a2l(dso, NULL);
}
#else /* HAVE_LIBBFD_SUPPORT */
@@ -638,7 +638,7 @@ static int addr2line(const char *dso_name, u64 addr,
struct inline_node *node,
struct symbol *sym __maybe_unused)
{
- struct child_process *a2l = dso->a2l;
+ struct child_process *a2l = dso__a2l(dso);
char *record_function = NULL;
char *record_filename = NULL;
unsigned int record_line_nr = 0;
@@ -655,8 +655,9 @@ static int addr2line(const char *dso_name, u64 addr,
if (!filename__has_section(dso_name, ".debug_line"))
goto out;
- dso->a2l = addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name);
- a2l = dso->a2l;
+ dso__set_a2l(dso,
+ addr2line_subprocess_init(symbol_conf.addr2line_path, dso_name));
+ a2l = dso__a2l(dso);
}
if (a2l == NULL) {
@@ -770,7 +771,7 @@ out:
free(record_function);
free(record_filename);
if (io.eof) {
- dso->a2l = NULL;
+ dso__set_a2l(dso, NULL);
addr2line_subprocess_cleanup(a2l);
}
return ret;
@@ -778,14 +779,14 @@ out:
void dso__free_a2l(struct dso *dso)
{
- struct child_process *a2l = dso->a2l;
+ struct child_process *a2l = dso__a2l(dso);
if (!a2l)
return;
addr2line_subprocess_cleanup(a2l);
- dso->a2l = NULL;
+ dso__set_a2l(dso, NULL);
}
#endif /* HAVE_LIBBFD_SUPPORT */
@@ -823,33 +824,34 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
char *srcline;
const char *dso_name;
- if (!dso->has_srcline)
+ if (!dso__has_srcline(dso))
goto out;
- dso_name = dso__name(dso);
+ dso_name = srcline_dso_name(dso);
if (dso_name == NULL)
- goto out;
+ goto out_err;
if (!addr2line(dso_name, addr, &file, &line, dso,
unwind_inlines, NULL, sym))
- goto out;
+ goto out_err;
srcline = srcline_from_fileline(file, line);
free(file);
if (!srcline)
- goto out;
+ goto out_err;
- dso->a2l_fails = 0;
+ dso__set_a2l_fails(dso, 0);
return srcline;
-out:
- if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
- dso->has_srcline = 0;
+out_err:
+ dso__set_a2l_fails(dso, dso__a2l_fails(dso) + 1);
+ if (dso__a2l_fails(dso) > A2L_FAIL_LIMIT) {
+ dso__set_has_srcline(dso, false);
dso__free_a2l(dso);
}
-
+out:
if (!show_addr)
return (show_sym && sym) ?
strndup(sym->name, sym->namelen) : SRCLINE_UNKNOWN;
@@ -858,7 +860,7 @@ out:
if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
ip - sym->start) < 0)
return SRCLINE_UNKNOWN;
- } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
+ } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso__short_name(dso), addr) < 0)
return SRCLINE_UNKNOWN;
return srcline;
}
@@ -869,22 +871,23 @@ char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
char *file = NULL;
const char *dso_name;
- if (!dso->has_srcline)
- goto out;
+ if (!dso__has_srcline(dso))
+ return NULL;
- dso_name = dso__name(dso);
+ dso_name = srcline_dso_name(dso);
if (dso_name == NULL)
- goto out;
+ goto out_err;
if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
- goto out;
+ goto out_err;
- dso->a2l_fails = 0;
+ dso__set_a2l_fails(dso, 0);
return file;
-out:
- if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
- dso->has_srcline = 0;
+out_err:
+ dso__set_a2l_fails(dso, dso__a2l_fails(dso) + 1);
+ if (dso__a2l_fails(dso) > A2L_FAIL_LIMIT) {
+ dso__set_has_srcline(dso, false);
dso__free_a2l(dso);
}
@@ -982,7 +985,7 @@ struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr,
{
const char *dso_name;
- dso_name = dso__name(dso);
+ dso_name = srcline_dso_name(dso);
if (dso_name == NULL)
return NULL;
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 91d2f7f65d..186305fd2d 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -38,6 +38,7 @@
static int aggr_header_lens[] = {
[AGGR_CORE] = 18,
[AGGR_CACHE] = 22,
+ [AGGR_CLUSTER] = 20,
[AGGR_DIE] = 12,
[AGGR_SOCKET] = 6,
[AGGR_NODE] = 6,
@@ -49,6 +50,7 @@ static int aggr_header_lens[] = {
static const char *aggr_header_csv[] = {
[AGGR_CORE] = "core,cpus,",
[AGGR_CACHE] = "cache,cpus,",
+ [AGGR_CLUSTER] = "cluster,cpus,",
[AGGR_DIE] = "die,cpus,",
[AGGR_SOCKET] = "socket,cpus,",
[AGGR_NONE] = "cpu,",
@@ -60,6 +62,7 @@ static const char *aggr_header_csv[] = {
static const char *aggr_header_std[] = {
[AGGR_CORE] = "core",
[AGGR_CACHE] = "cache",
+ [AGGR_CLUSTER] = "cluster",
[AGGR_DIE] = "die",
[AGGR_SOCKET] = "socket",
[AGGR_NONE] = "cpu",
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 3466aa9524..6bb975e46d 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -176,6 +176,13 @@ static double find_stat(const struct evsel *evsel, int aggr_idx, enum stat_type
if (type != evsel__stat_type(cur))
continue;
+ /*
+ * Except the SW CLOCK events,
+ * ignore if not the PMU we're looking for.
+ */
+ if ((type != STAT_NSECS) && (evsel->pmu != cur->pmu))
+ continue;
+
aggr = &cur->stats->aggr[aggr_idx];
if (type == STAT_NSECS)
return aggr->counts.val;
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index b0bcf92f0f..0bd5467389 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
if (!counter->per_pkg)
return 0;
- if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
+ if (perf_cpu_map__is_any_cpu_or_is_empty(cpus))
return 0;
if (!mask) {
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index d6e5c8787b..fd7a187551 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -87,6 +87,7 @@ struct perf_stat_config {
bool metric_no_group;
bool metric_no_merge;
bool metric_no_threshold;
+ bool hardware_aware_grouping;
bool stop_read_counter;
bool iostat_run;
char *user_requested_cpu_list;
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1892e9b6aa..2b04f47f4d 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -725,26 +725,24 @@ static void scan_core_topology(int *map, struct topology *t, int nr_cpus)
static int str_to_bitmap(char *s, cpumask_t *b, int nr_cpus)
{
- int i;
- int ret = 0;
- struct perf_cpu_map *m;
- struct perf_cpu c;
+ int idx, ret = 0;
+ struct perf_cpu_map *map;
+ struct perf_cpu cpu;
- m = perf_cpu_map__new(s);
- if (!m)
+ map = perf_cpu_map__new(s);
+ if (!map)
return -1;
- for (i = 0; i < perf_cpu_map__nr(m); i++) {
- c = perf_cpu_map__cpu(m, i);
- if (c.cpu >= nr_cpus) {
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ if (cpu.cpu >= nr_cpus) {
ret = -1;
break;
}
- __set_bit(c.cpu, cpumask_bits(b));
+ __set_bit(cpu.cpu, cpumask_bits(b));
}
- perf_cpu_map__put(m);
+ perf_cpu_map__put(map);
return ret;
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 0b91f813c4..e398abfd13 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -174,7 +174,7 @@ static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
{
- return elf_sec__is_text(shdr, secstrs) ||
+ return elf_sec__is_text(shdr, secstrs) ||
elf_sec__is_data(shdr, secstrs);
}
@@ -312,8 +312,8 @@ static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
* DWARF DW_compile_unit has this, but we don't always have access
* to it...
*/
- if (!want_demangle(dso->kernel || kmodule))
- return demangled;
+ if (!want_demangle(dso__kernel(dso) || kmodule))
+ return demangled;
demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
if (demangled == NULL) {
@@ -470,7 +470,7 @@ static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
}
if (*plt_entry_size)
return true;
- pr_debug("Missing PLT entry size for %s\n", dso->long_name);
+ pr_debug("Missing PLT entry size for %s\n", dso__long_name(dso));
return false;
}
@@ -654,7 +654,7 @@ static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
if (!sym)
goto out;
- symbols__insert(&dso->symbols, sym);
+ symbols__insert(dso__symbols(dso), sym);
}
err = 0;
out:
@@ -708,7 +708,7 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
if (!plt_sym)
goto out_elf_end;
- symbols__insert(&dso->symbols, plt_sym);
+ symbols__insert(dso__symbols(dso), plt_sym);
/* Only x86 has .plt.got */
if (machine_is_x86(ehdr.e_machine) &&
@@ -830,7 +830,7 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
goto out_elf_end;
plt_offset += plt_entry_size;
- symbols__insert(&dso->symbols, f);
+ symbols__insert(dso__symbols(dso), f);
++nr;
}
@@ -840,7 +840,7 @@ out_elf_end:
if (err == 0)
return nr;
pr_debug("%s: problems reading %s PLT info.\n",
- __func__, dso->long_name);
+ __func__, dso__long_name(dso));
return 0;
}
@@ -1175,19 +1175,19 @@ static int dso__swap_init(struct dso *dso, unsigned char eidata)
{
static unsigned int const endian = 1;
- dso->needs_swap = DSO_SWAP__NO;
+ dso__set_needs_swap(dso, DSO_SWAP__NO);
switch (eidata) {
case ELFDATA2LSB:
/* We are big endian, DSO is little endian. */
if (*(unsigned char const *)&endian != 1)
- dso->needs_swap = DSO_SWAP__YES;
+ dso__set_needs_swap(dso, DSO_SWAP__YES);
break;
case ELFDATA2MSB:
/* We are little endian, DSO is big endian. */
if (*(unsigned char const *)&endian != 0)
- dso->needs_swap = DSO_SWAP__YES;
+ dso__set_needs_swap(dso, DSO_SWAP__YES);
break;
default:
@@ -1238,11 +1238,11 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (fd < 0)
return -1;
- type = dso->symtab_type;
+ type = dso__symtab_type(dso);
} else {
fd = open(name, O_RDONLY);
if (fd < 0) {
- dso->load_errno = errno;
+ *dso__load_errno(dso) = errno;
return -1;
}
}
@@ -1250,37 +1250,37 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
if (elf == NULL) {
pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
- dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
+ *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
goto out_close;
}
if (gelf_getehdr(elf, &ehdr) == NULL) {
- dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
+ *dso__load_errno(dso) = DSO_LOAD_ERRNO__INVALID_ELF;
pr_debug("%s: cannot get elf header.\n", __func__);
goto out_elf_end;
}
if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
- dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
+ *dso__load_errno(dso) = DSO_LOAD_ERRNO__INTERNAL_ERROR;
goto out_elf_end;
}
/* Always reject images with a mismatched build-id: */
- if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
+ if (dso__has_build_id(dso) && !symbol_conf.ignore_vmlinux_buildid) {
u8 build_id[BUILD_ID_SIZE];
struct build_id bid;
int size;
size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
if (size <= 0) {
- dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
+ *dso__load_errno(dso) = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
goto out_elf_end;
}
build_id__init(&bid, build_id, size);
if (!dso__build_id_equal(dso, &bid)) {
pr_debug("%s: build id mismatch for %s.\n", __func__, name);
- dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
+ *dso__load_errno(dso) = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
goto out_elf_end;
}
}
@@ -1305,14 +1305,14 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (ss->opdshdr.sh_type != SHT_PROGBITS)
ss->opdsec = NULL;
- if (dso->kernel == DSO_SPACE__USER)
+ if (dso__kernel(dso) == DSO_SPACE__USER)
ss->adjust_symbols = true;
else
ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
ss->name = strdup(name);
if (!ss->name) {
- dso->load_errno = errno;
+ *dso__load_errno(dso) = errno;
goto out_elf_end;
}
@@ -1419,7 +1419,7 @@ void __weak arch__sym_update(struct symbol *s __maybe_unused,
static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
GElf_Sym *sym, GElf_Shdr *shdr,
struct maps *kmaps, struct kmap *kmap,
- struct dso **curr_dsop, struct map **curr_mapp,
+ struct dso **curr_dsop,
const char *section_name,
bool adjust_kernel_syms, bool kmodule, bool *remap_kernel,
u64 max_text_sh_offset)
@@ -1432,7 +1432,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
if (adjust_kernel_syms)
sym->st_value -= shdr->sh_addr - shdr->sh_offset;
- if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
+ if (strcmp(section_name, (dso__short_name(curr_dso) + dso__short_name_len(dso))) == 0)
return 0;
if (strcmp(section_name, ".text") == 0) {
@@ -1441,7 +1441,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
* kallsyms and identity maps. Overwrite it to
* map to the kernel dso.
*/
- if (*remap_kernel && dso->kernel && !kmodule) {
+ if (*remap_kernel && dso__kernel(dso) && !kmodule) {
*remap_kernel = false;
map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
map__set_end(map, map__start(map) + shdr->sh_size);
@@ -1470,8 +1470,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
map__set_pgoff(map, shdr->sh_offset);
}
- *curr_mapp = map;
- *curr_dsop = dso;
+ dso__put(*curr_dsop);
+ *curr_dsop = dso__get(dso);
return 0;
}
@@ -1484,12 +1484,12 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
*/
if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) &&
shdr->sh_offset <= max_text_sh_offset) {
- *curr_mapp = map;
- *curr_dsop = dso;
+ dso__put(*curr_dsop);
+ *curr_dsop = dso__get(dso);
return 0;
}
- snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
+ snprintf(dso_name, sizeof(dso_name), "%s%s", dso__short_name(dso), section_name);
curr_map = maps__find_by_name(kmaps, dso_name);
if (curr_map == NULL) {
@@ -1501,17 +1501,17 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_dso = dso__new(dso_name);
if (curr_dso == NULL)
return -1;
- curr_dso->kernel = dso->kernel;
- curr_dso->long_name = dso->long_name;
- curr_dso->long_name_len = dso->long_name_len;
- curr_dso->binary_type = dso->binary_type;
- curr_dso->adjust_symbols = dso->adjust_symbols;
+ dso__set_kernel(curr_dso, dso__kernel(dso));
+ RC_CHK_ACCESS(curr_dso)->long_name = dso__long_name(dso);
+ RC_CHK_ACCESS(curr_dso)->long_name_len = dso__long_name_len(dso);
+ dso__set_binary_type(curr_dso, dso__binary_type(dso));
+ dso__set_adjust_symbols(curr_dso, dso__adjust_symbols(dso));
curr_map = map__new2(start, curr_dso);
- dso__put(curr_dso);
- if (curr_map == NULL)
+ if (curr_map == NULL) {
+ dso__put(curr_dso);
return -1;
-
- if (curr_dso->kernel)
+ }
+ if (dso__kernel(curr_dso))
map__kmap(curr_map)->kmaps = kmaps;
if (adjust_kernel_syms) {
@@ -1521,24 +1521,18 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
} else {
map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
}
- curr_dso->symtab_type = dso->symtab_type;
+ dso__set_symtab_type(curr_dso, dso__symtab_type(dso));
if (maps__insert(kmaps, curr_map))
return -1;
- /*
- * Add it before we drop the reference to curr_map, i.e. while
- * we still are sure to have a reference to this DSO via
- * *curr_map->dso.
- */
dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
- /* kmaps already got it */
- map__put(curr_map);
dso__set_loaded(curr_dso);
- *curr_mapp = curr_map;
+ dso__put(*curr_dsop);
*curr_dsop = curr_dso;
} else {
- *curr_dsop = map__dso(curr_map);
- map__put(curr_map);
+ dso__put(*curr_dsop);
+ *curr_dsop = dso__get(map__dso(curr_map));
}
+ map__put(curr_map);
return 0;
}
@@ -1547,13 +1541,11 @@ static int
dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, int kmodule, int dynsym)
{
- struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
+ struct kmap *kmap = dso__kernel(dso) ? map__kmap(map) : NULL;
struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
- struct map *curr_map = map;
- struct dso *curr_dso = dso;
+ struct dso *curr_dso = NULL;
Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
uint32_t nr_syms;
- int err = -1;
uint32_t idx;
GElf_Ehdr ehdr;
GElf_Shdr shdr;
@@ -1581,8 +1573,8 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
".text", NULL)) {
- dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
- dso->text_end = tshdr.sh_offset + tshdr.sh_size;
+ dso__set_text_offset(dso, tshdr.sh_addr - tshdr.sh_offset);
+ dso__set_text_end(dso, tshdr.sh_offset + tshdr.sh_size);
}
if (runtime_ss->opdsec)
@@ -1641,21 +1633,22 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
* attempted to prelink vdso to its virtual address.
*/
if (dso__is_vdso(dso))
- map__set_reloc(map, map__start(map) - dso->text_offset);
+ map__set_reloc(map, map__start(map) - dso__text_offset(dso));
- dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
+ dso__set_adjust_symbols(dso, runtime_ss->adjust_symbols || ref_reloc(kmap));
/*
* Initial kernel and module mappings do not map to the dso.
* Flag the fixups.
*/
- if (dso->kernel) {
+ if (dso__kernel(dso)) {
remap_kernel = true;
- adjust_kernel_syms = dso->adjust_symbols;
+ adjust_kernel_syms = dso__adjust_symbols(dso);
}
if (kmodule && adjust_kernel_syms)
max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr);
+ curr_dso = dso__get(dso);
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
const char *elf_name = elf_sym__name(&sym, symstrs);
@@ -1743,10 +1736,14 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
(sym.st_value & 1))
--sym.st_value;
- if (dso->kernel) {
- if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
- section_name, adjust_kernel_syms, kmodule,
- &remap_kernel, max_text_sh_offset))
+ if (dso__kernel(dso)) {
+ if (dso__process_kernel_symbol(dso, map, &sym, &shdr,
+ kmaps, kmap, &curr_dso,
+ section_name,
+ adjust_kernel_syms,
+ kmodule,
+ &remap_kernel,
+ max_text_sh_offset))
goto out_elf_end;
} else if ((used_opd && runtime_ss->adjust_symbols) ||
(!used_opd && syms_ss->adjust_symbols)) {
@@ -1792,16 +1789,17 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
arch__sym_update(f, &sym);
- __symbols__insert(&curr_dso->symbols, f, dso->kernel);
+ __symbols__insert(dso__symbols(curr_dso), f, dso__kernel(dso));
nr++;
}
+ dso__put(curr_dso);
/*
* For misannotated, zeroed, ASM function sizes.
*/
if (nr > 0) {
- symbols__fixup_end(&dso->symbols, false);
- symbols__fixup_duplicate(&dso->symbols);
+ symbols__fixup_end(dso__symbols(dso), false);
+ symbols__fixup_duplicate(dso__symbols(dso));
if (kmap) {
/*
* We need to fixup this here too because we create new
@@ -1810,9 +1808,10 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
maps__fixup_end(kmaps);
}
}
- err = nr;
+ return nr;
out_elf_end:
- return err;
+ dso__put(curr_dso);
+ return -1;
}
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
@@ -1821,16 +1820,16 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
int nr = 0;
int err = -1;
- dso->symtab_type = syms_ss->type;
- dso->is_64_bit = syms_ss->is_64_bit;
- dso->rel = syms_ss->ehdr.e_type == ET_REL;
+ dso__set_symtab_type(dso, syms_ss->type);
+ dso__set_is_64_bit(dso, syms_ss->is_64_bit);
+ dso__set_rel(dso, syms_ss->ehdr.e_type == ET_REL);
/*
* Modules may already have symbols from kallsyms, but those symbols
* have the wrong values for the dso maps, so remove them.
*/
if (kmodule && syms_ss->symtab)
- symbols__delete(&dso->symbols);
+ symbols__delete(dso__symbols(dso));
if (!syms_ss->symtab) {
/*
@@ -1838,7 +1837,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
* to using kallsyms. The vmlinux runtime symbols aren't
* of much use.
*/
- if (dso->kernel)
+ if (dso__kernel(dso))
return err;
} else {
err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index 1da8b71350..c6f369b5d8 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -273,7 +273,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
out_close:
close(fd);
out_errno:
- dso->load_errno = errno;
+ RC_CHK_ACCESS(dso)->load_errno = errno;
return -1;
}
@@ -348,7 +348,7 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
ret = fd__is_64_bit(ss->fd);
if (ret >= 0)
- dso->is_64_bit = ret;
+ RC_CHK_ACCESS(dso)->is_64_bit = ret;
if (filename__read_build_id(ss->name, &bid) > 0)
dso__set_build_id(dso, &bid);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 68dbeae8d2..22646f0cca 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -27,6 +27,7 @@
#include "symbol.h"
#include "map_symbol.h"
#include "mem-events.h"
+#include "mem-info.h"
#include "symsrc.h"
#include "strlist.h"
#include "intlist.h"
@@ -532,52 +533,52 @@ static struct symbol *symbols__find_by_name(struct symbol *symbols[],
void dso__reset_find_symbol_cache(struct dso *dso)
{
- dso->last_find_result.addr = 0;
- dso->last_find_result.symbol = NULL;
+ dso__set_last_find_result_addr(dso, 0);
+ dso__set_last_find_result_symbol(dso, NULL);
}
void dso__insert_symbol(struct dso *dso, struct symbol *sym)
{
- __symbols__insert(&dso->symbols, sym, dso->kernel);
+ __symbols__insert(dso__symbols(dso), sym, dso__kernel(dso));
/* update the symbol cache if necessary */
- if (dso->last_find_result.addr >= sym->start &&
- (dso->last_find_result.addr < sym->end ||
+ if (dso__last_find_result_addr(dso) >= sym->start &&
+ (dso__last_find_result_addr(dso) < sym->end ||
sym->start == sym->end)) {
- dso->last_find_result.symbol = sym;
+ dso__set_last_find_result_symbol(dso, sym);
}
}
void dso__delete_symbol(struct dso *dso, struct symbol *sym)
{
- rb_erase_cached(&sym->rb_node, &dso->symbols);
+ rb_erase_cached(&sym->rb_node, dso__symbols(dso));
symbol__delete(sym);
dso__reset_find_symbol_cache(dso);
}
struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
{
- if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
- dso->last_find_result.addr = addr;
- dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
+ if (dso__last_find_result_addr(dso) != addr || dso__last_find_result_symbol(dso) == NULL) {
+ dso__set_last_find_result_addr(dso, addr);
+ dso__set_last_find_result_symbol(dso, symbols__find(dso__symbols(dso), addr));
}
- return dso->last_find_result.symbol;
+ return dso__last_find_result_symbol(dso);
}
struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
{
- return symbols__find(&dso->symbols, addr);
+ return symbols__find(dso__symbols(dso), addr);
}
struct symbol *dso__first_symbol(struct dso *dso)
{
- return symbols__first(&dso->symbols);
+ return symbols__first(dso__symbols(dso));
}
struct symbol *dso__last_symbol(struct dso *dso)
{
- return symbols__last(&dso->symbols);
+ return symbols__last(dso__symbols(dso));
}
struct symbol *dso__next_symbol(struct symbol *sym)
@@ -587,11 +588,11 @@ struct symbol *dso__next_symbol(struct symbol *sym)
struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
{
- if (*idx + 1 >= dso->symbol_names_len)
+ if (*idx + 1 >= dso__symbol_names_len(dso))
return NULL;
++*idx;
- return dso->symbol_names[*idx];
+ return dso__symbol_names(dso)[*idx];
}
/*
@@ -599,27 +600,29 @@ struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
*/
struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
{
- struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
- name, SYMBOL_TAG_INCLUDE__NONE, idx);
- if (!s)
- s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
- name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
+ struct symbol *s = symbols__find_by_name(dso__symbol_names(dso),
+ dso__symbol_names_len(dso),
+ name, SYMBOL_TAG_INCLUDE__NONE, idx);
+ if (!s) {
+ s = symbols__find_by_name(dso__symbol_names(dso), dso__symbol_names_len(dso),
+ name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
+ }
return s;
}
void dso__sort_by_name(struct dso *dso)
{
- mutex_lock(&dso->lock);
+ mutex_lock(dso__lock(dso));
if (!dso__sorted_by_name(dso)) {
size_t len;
- dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len);
- if (dso->symbol_names) {
- dso->symbol_names_len = len;
+ dso__set_symbol_names(dso, symbols__sort_by_name(dso__symbols(dso), &len));
+ if (dso__symbol_names(dso)) {
+ dso__set_symbol_names_len(dso, len);
dso__set_sorted_by_name(dso);
}
}
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
}
/*
@@ -746,7 +749,7 @@ static int map__process_kallsym_symbol(void *arg, const char *name,
{
struct symbol *sym;
struct dso *dso = arg;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
if (!symbol_type__filter(type))
return 0;
@@ -786,8 +789,8 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
struct symbol *pos;
int count = 0;
- struct rb_root_cached old_root = dso->symbols;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
+ struct rb_root_cached old_root = *root;
struct rb_node *next = rb_first_cached(root);
if (!kmaps)
@@ -821,13 +824,13 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
pos->end = map__end(curr_map);
if (pos->end)
pos->end -= map__start(curr_map) - map__pgoff(curr_map);
- symbols__insert(&curr_map_dso->symbols, pos);
+ symbols__insert(dso__symbols(curr_map_dso), pos);
++count;
map__put(curr_map);
}
/* Symbols have been adjusted */
- dso->adjust_symbols = 1;
+ dso__set_adjust_symbols(dso, true);
return count;
}
@@ -844,7 +847,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
struct map *curr_map = map__get(initial_map);
struct symbol *pos;
int count = 0, moved = 0;
- struct rb_root_cached *root = &dso->symbols;
+ struct rb_root_cached *root = dso__symbols(dso);
struct rb_node *next = rb_first_cached(root);
int kernel_range = 0;
bool x86_64;
@@ -871,9 +874,9 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
*module++ = '\0';
curr_map_dso = map__dso(curr_map);
- if (strcmp(curr_map_dso->short_name, module)) {
+ if (strcmp(dso__short_name(curr_map_dso), module)) {
if (!RC_CHK_EQUAL(curr_map, initial_map) &&
- dso->kernel == DSO_SPACE__KERNEL_GUEST &&
+ dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(machine)) {
/*
* We assume all symbols of a module are
@@ -896,7 +899,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
goto discard_symbol;
}
curr_map_dso = map__dso(curr_map);
- if (curr_map_dso->loaded &&
+ if (dso__loaded(curr_map_dso) &&
!machine__is_default_guest(machine))
goto discard_symbol;
}
@@ -932,7 +935,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
goto add_symbol;
}
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
snprintf(dso_name, sizeof(dso_name),
"[guest.kernel].%d",
kernel_range++);
@@ -946,7 +949,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
if (ndso == NULL)
return -1;
- ndso->kernel = dso->kernel;
+ dso__set_kernel(ndso, dso__kernel(dso));
curr_map = map__new2(pos->start, ndso);
if (curr_map == NULL) {
@@ -971,7 +974,7 @@ add_symbol:
struct dso *curr_map_dso = map__dso(curr_map);
rb_erase_cached(&pos->rb_node, root);
- symbols__insert(&curr_map_dso->symbols, pos);
+ symbols__insert(dso__symbols(curr_map_dso), pos);
++moved;
} else
++count;
@@ -983,7 +986,7 @@ discard_symbol:
}
if (!RC_CHK_EQUAL(curr_map, initial_map) &&
- dso->kernel == DSO_SPACE__KERNEL_GUEST &&
+ dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(maps__machine(kmaps))) {
dso__set_loaded(map__dso(curr_map));
}
@@ -1157,7 +1160,7 @@ static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
dso = map__dso(old_map);
/* Module must be in memory at the same address */
- mi = find_module(dso->short_name, modules);
+ mi = find_module(dso__short_name(dso), modules);
if (!mi || mi->start != map__start(old_map))
return -EINVAL;
@@ -1326,7 +1329,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
&is_64_bit);
if (err)
goto out_err;
- dso->is_64_bit = is_64_bit;
+ dso__set_is_64_bit(dso, is_64_bit);
if (list_empty(&md.maps)) {
err = -EINVAL;
@@ -1422,10 +1425,10 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
* Set the data type and long name so that kcore can be read via
* dso__data_read_addr().
*/
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
- dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KCORE);
else
- dso->binary_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__KCORE);
dso__set_long_name(dso, strdup(kcore_filename), true);
close(fd);
@@ -1486,13 +1489,13 @@ int __dso__load_kallsyms(struct dso *dso, const char *filename,
if (kallsyms__delta(kmap, filename, &delta))
return -1;
- symbols__fixup_end(&dso->symbols, true);
- symbols__fixup_duplicate(&dso->symbols);
+ symbols__fixup_end(dso__symbols(dso), true);
+ symbols__fixup_duplicate(dso__symbols(dso));
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
- dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
else
- dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
+ dso__set_symtab_type(dso, DSO_BINARY_TYPE__KALLSYMS);
if (!no_kcore && !dso__load_kcore(dso, map, filename))
return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
@@ -1548,7 +1551,7 @@ static int dso__load_perf_map(const char *map_path, struct dso *dso)
if (sym == NULL)
goto out_delete_line;
- symbols__insert(&dso->symbols, sym);
+ symbols__insert(dso__symbols(dso), sym);
nr_syms++;
}
@@ -1604,7 +1607,7 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
if (!bfd_check_format(abfd, bfd_object)) {
pr_debug2("%s: cannot read %s bfd file.\n", __func__,
- dso->long_name);
+ dso__long_name(dso));
goto out_close;
}
@@ -1637,12 +1640,13 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
}
if (i < symbols_count) {
/* PE symbols can only have 4 bytes, so use .text high bits */
- dso->text_offset = section->vma - (u32)section->vma;
- dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
- dso->text_end = (section->vma - dso->text_offset) + section->size;
+ u64 text_offset = (section->vma - (u32)section->vma)
+ + (u32)bfd_asymbol_value(symbols[i]);
+ dso__set_text_offset(dso, text_offset);
+ dso__set_text_end(dso, (section->vma - text_offset) + section->size);
} else {
- dso->text_offset = section->vma - section->filepos;
- dso->text_end = section->filepos + section->size;
+ dso__set_text_offset(dso, section->vma - section->filepos);
+ dso__set_text_end(dso, section->filepos + section->size);
}
}
@@ -1668,21 +1672,21 @@ int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
else
len = section->size - sym->value;
- start = bfd_asymbol_value(sym) - dso->text_offset;
+ start = bfd_asymbol_value(sym) - dso__text_offset(dso);
symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
bfd_asymbol_name(sym));
if (!symbol)
goto out_free;
- symbols__insert(&dso->symbols, symbol);
+ symbols__insert(dso__symbols(dso), symbol);
}
#ifdef bfd_get_section
#undef bfd_asymbol_section
#endif
- symbols__fixup_end(&dso->symbols, false);
- symbols__fixup_duplicate(&dso->symbols);
- dso->adjust_symbols = 1;
+ symbols__fixup_end(dso__symbols(dso), false);
+ symbols__fixup_duplicate(dso__symbols(dso));
+ dso__set_adjust_symbols(dso, true);
err = 0;
out_free:
@@ -1705,17 +1709,17 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
- return !kmod && dso->kernel == DSO_SPACE__USER;
+ return !kmod && dso__kernel(dso) == DSO_SPACE__USER;
case DSO_BINARY_TYPE__KALLSYMS:
case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__KCORE:
- return dso->kernel == DSO_SPACE__KERNEL;
+ return dso__kernel(dso) == DSO_SPACE__KERNEL;
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__GUEST_KCORE:
- return dso->kernel == DSO_SPACE__KERNEL_GUEST;
+ return dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST;
case DSO_BINARY_TYPE__GUEST_KMODULE:
case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
@@ -1725,7 +1729,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
* kernel modules know their symtab type - it's set when
* creating a module dso in machine__addnew_module_map().
*/
- return kmod && dso->symtab_type == type;
+ return kmod && dso__symtab_type(dso) == type;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
@@ -1793,18 +1797,20 @@ int dso__load(struct dso *dso, struct map *map)
struct build_id bid;
struct nscookie nsc;
char newmapname[PATH_MAX];
- const char *map_path = dso->long_name;
+ const char *map_path = dso__long_name(dso);
+
+ mutex_lock(dso__lock(dso));
+ perfmap = is_perf_pid_map_name(map_path);
- mutex_lock(&dso->lock);
- perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
if (perfmap) {
- if (dso->nsinfo && (dso__find_perf_map(newmapname,
- sizeof(newmapname), &dso->nsinfo) == 0)) {
+ if (dso__nsinfo(dso) &&
+ (dso__find_perf_map(newmapname, sizeof(newmapname),
+ dso__nsinfo_ptr(dso)) == 0)) {
map_path = newmapname;
}
}
- nsinfo__mountns_enter(dso->nsinfo, &nsc);
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
/* check again under the dso->lock */
if (dso__loaded(dso)) {
@@ -1812,15 +1818,15 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
- kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
- dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+ kmod = dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
+ dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE ||
+ dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
- if (dso->kernel && !kmod) {
- if (dso->kernel == DSO_SPACE__KERNEL)
+ if (dso__kernel(dso) && !kmod) {
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL)
ret = dso__load_kernel_sym(dso, map);
- else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
+ else if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
ret = dso__load_guest_kernel_sym(dso, map);
machine = maps__machine(map__kmaps(map));
@@ -1829,12 +1835,13 @@ int dso__load(struct dso *dso, struct map *map)
goto out;
}
- dso->adjust_symbols = 0;
+ dso__set_adjust_symbols(dso, false);
if (perfmap) {
ret = dso__load_perf_map(map_path, dso);
- dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
- DSO_BINARY_TYPE__NOT_FOUND;
+ dso__set_symtab_type(dso, ret > 0
+ ? DSO_BINARY_TYPE__JAVA_JIT
+ : DSO_BINARY_TYPE__NOT_FOUND);
goto out;
}
@@ -1849,9 +1856,9 @@ int dso__load(struct dso *dso, struct map *map)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
- if (!dso->has_build_id &&
- is_regular_file(dso->long_name)) {
- __symbol__join_symfs(name, PATH_MAX, dso->long_name);
+ if (!dso__has_build_id(dso) &&
+ is_regular_file(dso__long_name(dso))) {
+ __symbol__join_symfs(name, PATH_MAX, dso__long_name(dso));
if (filename__read_build_id(name, &bid) > 0)
dso__set_build_id(dso, &bid);
}
@@ -1885,7 +1892,7 @@ int dso__load(struct dso *dso, struct map *map)
nsinfo__mountns_exit(&nsc);
is_reg = is_regular_file(name);
- if (!is_reg && errno == ENOENT && dso->nsinfo) {
+ if (!is_reg && errno == ENOENT && dso__nsinfo(dso)) {
char *new_name = dso__filename_with_chroot(dso, name);
if (new_name) {
is_reg = is_regular_file(new_name);
@@ -1902,7 +1909,7 @@ int dso__load(struct dso *dso, struct map *map)
sirc = symsrc__init(ss, dso, name, symtab_type);
if (nsexit)
- nsinfo__mountns_enter(dso->nsinfo, &nsc);
+ nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
if (bfdrc == 0) {
ret = 0;
@@ -1915,8 +1922,8 @@ int dso__load(struct dso *dso, struct map *map)
if (!syms_ss && symsrc__has_symtab(ss)) {
syms_ss = ss;
next_slot = true;
- if (!dso->symsrc_filename)
- dso->symsrc_filename = strdup(name);
+ if (!dso__symsrc_filename(dso))
+ dso__set_symsrc_filename(dso, strdup(name));
}
if (!runtime_ss && symsrc__possibly_runtime(ss)) {
@@ -1963,11 +1970,11 @@ int dso__load(struct dso *dso, struct map *map)
symsrc__destroy(&ss_[ss_pos - 1]);
out_free:
free(name);
- if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
+ if (ret < 0 && strstr(dso__name(dso), " (deleted)") != NULL)
ret = 0;
out:
dso__set_loaded(dso);
- mutex_unlock(&dso->lock);
+ mutex_unlock(dso__lock(dso));
nsinfo__mountns_exit(&nsc);
return ret;
@@ -1990,7 +1997,7 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
else
symbol__join_symfs(symfs_vmlinux, vmlinux);
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
else
symtab_type = DSO_BINARY_TYPE__VMLINUX;
@@ -2006,10 +2013,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
* an incorrect long name unless we set it here first.
*/
dso__set_long_name(dso, vmlinux, vmlinux_allocated);
- if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
- dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ if (dso__kernel(dso) == DSO_SPACE__KERNEL_GUEST)
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_VMLINUX);
else
- dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__VMLINUX);
err = dso__load_sym(dso, map, &ss, &ss, 0);
symsrc__destroy(&ss);
@@ -2101,7 +2108,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
bool is_host = false;
char path[PATH_MAX];
- if (!dso->has_build_id) {
+ if (!dso__has_build_id(dso)) {
/*
* Last resort, if we don't have a build-id and couldn't find
* any vmlinux file, try the running kernel kallsyms table.
@@ -2126,7 +2133,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
goto proc_kallsyms;
}
- build_id__sprintf(&dso->bid, sbuild_id);
+ build_id__sprintf(dso__bid(dso), sbuild_id);
/* Find kallsyms in build-id cache with kcore */
scnprintf(path, sizeof(path), "%s/%s/%s",
@@ -2218,7 +2225,7 @@ do_kallsyms:
free(kallsyms_allocated_filename);
if (err > 0 && !dso__is_kcore(dso)) {
- dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__KALLSYMS);
dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
map__fixup_start(map);
map__fixup_end(map);
@@ -2261,7 +2268,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
if (err > 0 && !dso__is_kcore(dso)) {
- dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
+ dso__set_binary_type(dso, DSO_BINARY_TYPE__GUEST_KALLSYMS);
dso__set_long_name(dso, machine->mmap_name, false);
map__fixup_start(map);
map__fixup_end(map);
@@ -2575,31 +2582,6 @@ int symbol__config_symfs(const struct option *opt __maybe_unused,
return 0;
}
-struct mem_info *mem_info__get(struct mem_info *mi)
-{
- if (mi)
- refcount_inc(&mi->refcnt);
- return mi;
-}
-
-void mem_info__put(struct mem_info *mi)
-{
- if (mi && refcount_dec_and_test(&mi->refcnt)) {
- addr_map_symbol__exit(&mi->iaddr);
- addr_map_symbol__exit(&mi->daddr);
- free(mi);
- }
-}
-
-struct mem_info *mem_info__new(void)
-{
- struct mem_info *mi = zalloc(sizeof(*mi));
-
- if (mi)
- refcount_set(&mi->refcnt, 1);
- return mi;
-}
-
/*
* Checks that user supplied symbol kernel files are accessible because
* the default mechanism for accessing elf files fails silently. i.e. if
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 071837ddce..3fb5d146d9 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -268,18 +268,6 @@ enum {
SDT_NOTE_IDX_REFCTR,
};
-struct mem_info *mem_info__new(void);
-struct mem_info *mem_info__get(struct mem_info *mi);
-void mem_info__put(struct mem_info *mi);
-
-static inline void __mem_info__zput(struct mem_info **mi)
-{
- mem_info__put(*mi);
- *mi = NULL;
-}
-
-#define mem_info__zput(mi) __mem_info__zput(&mi)
-
int symbol__validate_sym_arguments(void);
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index 088f4abf23..53e1af4ed9 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -64,8 +64,8 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
{
size_t ret = 0;
- for (size_t i = 0; i < dso->symbol_names_len; i++) {
- struct symbol *pos = dso->symbol_names[i];
+ for (size_t i = 0; i < dso__symbol_names_len(dso); i++) {
+ struct symbol *pos = dso__symbol_names(dso)[i];
ret += fprintf(fp, "%s\n", pos->name);
}
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index 2a0289c149..5498048f56 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -385,8 +385,8 @@ static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
id.ino_generation = event->ino_generation;
dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
- if (dso && dso->has_build_id) {
- bid = dso->bid;
+ if (dso && dso__has_build_id(dso)) {
+ bid = *dso__bid(dso);
rc = 0;
goto out;
}
@@ -407,7 +407,7 @@ out:
event->__reserved_1 = 0;
event->__reserved_2 = 0;
- if (dso && !dso->has_build_id)
+ if (dso && !dso__has_build_id(dso))
dso__set_build_id(dso, &bid);
} else {
if (event->filename[0] == '/') {
@@ -684,7 +684,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
dso = map__dso(map);
if (symbol_conf.buildid_mmap2) {
- size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+ size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
event->mmap2.header.type = PERF_RECORD_MMAP2;
event->mmap2.header.size = (sizeof(event->mmap2) -
(sizeof(event->mmap2.filename) - size));
@@ -694,11 +694,11 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
event->mmap2.len = map__size(map);
event->mmap2.pid = args->machine->pid;
- memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
+ memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
} else {
- size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
+ size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size));
@@ -708,7 +708,7 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
event->mmap.len = map__size(map);
event->mmap.pid = args->machine->pid;
- memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
+ memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
}
if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
@@ -2231,20 +2231,20 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16
union perf_event ev;
size_t len;
- if (!pos->hit)
+ if (!dso__hit(pos))
return 0;
memset(&ev, 0, sizeof(ev));
- len = pos->long_name_len + 1;
+ len = dso__long_name_len(pos) + 1;
len = PERF_ALIGN(len, NAME_ALIGN);
- ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data));
- memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size);
+ ev.build_id.size = min(dso__bid(pos)->size, sizeof(dso__bid(pos)->data));
+ memcpy(&ev.build_id.build_id, dso__bid(pos)->data, ev.build_id.size);
ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
ev.build_id.pid = machine->pid;
ev.build_id.header.size = sizeof(ev.build_id) + len;
- memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
+ memcpy(&ev.build_id.filename, dso__long_name(pos), dso__long_name_len(pos));
return process(tool, &ev, NULL, machine);
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 515726489e..87c59aa9fe 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -453,14 +453,14 @@ int thread__memcpy(struct thread *thread, struct machine *machine,
dso = map__dso(al.map);
- if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
+ if (!dso || dso__data(dso)->status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
addr_location__exit(&al);
return -1;
}
offset = map__map_ip(al.map, ip);
if (is64bit)
- *is64bit = dso->is_64_bit;
+ *is64bit = dso__is_64_bit(dso);
addr_location__exit(&al);
diff --git a/tools/perf/util/tracepoint.c b/tools/perf/util/tracepoint.c
index 92dd8b455b..95377ed5d8 100644
--- a/tools/perf/util/tracepoint.c
+++ b/tools/perf/util/tracepoint.c
@@ -4,10 +4,12 @@
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
+#include <stdlib.h>
#include <sys/param.h>
#include <unistd.h>
#include <api/fs/tracing_path.h>
+#include "fncache.h"
int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
{
@@ -26,39 +28,25 @@ int tp_event_has_id(const char *dir_path, struct dirent *evt_dir)
/*
* Check whether event is in <debugfs_mount_point>/tracing/events
*/
-int is_valid_tracepoint(const char *event_string)
+bool is_valid_tracepoint(const char *event_string)
{
- DIR *sys_dir, *evt_dir;
- struct dirent *sys_dirent, *evt_dirent;
- char evt_path[MAXPATHLEN];
- char *dir_path;
-
- sys_dir = tracing_events__opendir();
- if (!sys_dir)
- return 0;
-
- for_each_subsystem(sys_dir, sys_dirent) {
- dir_path = get_events_file(sys_dirent->d_name);
- if (!dir_path)
- continue;
- evt_dir = opendir(dir_path);
- if (!evt_dir)
- goto next;
-
- for_each_event(dir_path, evt_dir, evt_dirent) {
- snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent->d_name, evt_dirent->d_name);
- if (!strcmp(evt_path, event_string)) {
- closedir(evt_dir);
- put_events_file(dir_path);
- closedir(sys_dir);
- return 1;
- }
- }
- closedir(evt_dir);
-next:
- put_events_file(dir_path);
- }
- closedir(sys_dir);
- return 0;
+ char *dst, *path = malloc(strlen(event_string) + 4); /* Space for "/id\0". */
+ bool have_file = false; /* Conservatively return false if memory allocation failed. */
+ const char *src;
+
+ if (!path)
+ return false;
+
+ /* Copy event_string replacing the ':' with '/'. */
+ for (src = event_string, dst = path; *src; src++, dst++)
+ *dst = (*src == ':') ? '/' : *src;
+ /* Add "/id\0". */
+ memcpy(dst, "/id", 4);
+
+ dst = get_events_file(path);
+ if (dst)
+ have_file = file_available(dst);
+ free(dst);
+ free(path);
+ return have_file;
}
diff --git a/tools/perf/util/tracepoint.h b/tools/perf/util/tracepoint.h
index c4a110fe87..65ccb01fc3 100644
--- a/tools/perf/util/tracepoint.h
+++ b/tools/perf/util/tracepoint.h
@@ -4,6 +4,7 @@
#include <dirent.h>
#include <string.h>
+#include <stdbool.h>
int tp_event_has_id(const char *dir_path, struct dirent *evt_dir);
@@ -20,6 +21,6 @@ int tp_event_has_id(const char *dir_path, struct dirent *evt_dir);
(strcmp(sys_dirent->d_name, ".")) && \
(strcmp(sys_dirent->d_name, "..")))
-int is_valid_tracepoint(const char *event_string);
+bool is_valid_tracepoint(const char *event_string);
#endif /* __PERF_TRACEPOINT_H */
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index b38d322734..bde216e630 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -29,8 +29,8 @@ static int __find_debuginfo(Dwfl_Module *mod __maybe_unused, void **userdata,
const struct dso *dso = *userdata;
assert(dso);
- if (dso->symsrc_filename && strcmp (file_name, dso->symsrc_filename))
- *debuginfo_file_name = strdup(dso->symsrc_filename);
+ if (dso__symsrc_filename(dso) && strcmp(file_name, dso__symsrc_filename(dso)))
+ *debuginfo_file_name = strdup(dso__symsrc_filename(dso));
return -1;
}
@@ -66,7 +66,7 @@ static int __report_module(struct addr_location *al, u64 ip,
* a different code in another DSO. So just use the map->start
* directly to pick the correct one.
*/
- if (!strncmp(dso->long_name, "/tmp/jitted-", 12))
+ if (!strncmp(dso__long_name(dso), "/tmp/jitted-", 12))
base = map__start(al->map);
else
base = map__start(al->map) - map__pgoff(al->map);
@@ -83,15 +83,15 @@ static int __report_module(struct addr_location *al, u64 ip,
if (!mod) {
char filename[PATH_MAX];
- __symbol__join_symfs(filename, sizeof(filename), dso->long_name);
- mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+ __symbol__join_symfs(filename, sizeof(filename), dso__long_name(dso));
+ mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
base, false);
}
if (!mod) {
char filename[PATH_MAX];
if (dso__build_id_filename(dso, filename, sizeof(filename), false))
- mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1,
+ mod = dwfl_report_elf(ui->dwfl, dso__short_name(dso), filename, -1,
base, false);
}
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 6a5ac0faa6..7460bb96bd 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -329,27 +329,27 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui,
};
int ret, fd;
- if (dso->data.eh_frame_hdr_offset == 0) {
+ if (dso__data(dso)->eh_frame_hdr_offset == 0) {
fd = dso__data_get_fd(dso, ui->machine);
if (fd < 0)
return -EINVAL;
/* Check the .eh_frame section for unwinding info */
ret = elf_section_address_and_offset(fd, ".eh_frame_hdr",
- &dso->data.eh_frame_hdr_addr,
- &dso->data.eh_frame_hdr_offset);
- dso->data.elf_base_addr = elf_base_address(fd);
+ &dso__data(dso)->eh_frame_hdr_addr,
+ &dso__data(dso)->eh_frame_hdr_offset);
+ dso__data(dso)->elf_base_addr = elf_base_address(fd);
dso__data_put_fd(dso);
- if (ret || dso->data.eh_frame_hdr_offset == 0)
+ if (ret || dso__data(dso)->eh_frame_hdr_offset == 0)
return -EINVAL;
}
maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args);
- args.base_addr -= dso->data.elf_base_addr;
+ args.base_addr -= dso__data(dso)->elf_base_addr;
/* Address of .eh_frame_hdr */
- *segbase = args.base_addr + dso->data.eh_frame_hdr_addr;
- ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset,
+ *segbase = args.base_addr + dso__data(dso)->eh_frame_hdr_addr;
+ ret = unwind_spec_ehframe(dso, ui->machine, dso__data(dso)->eh_frame_hdr_offset,
table_data, fde_count);
if (ret)
return ret;
@@ -363,7 +363,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
struct machine *machine, u64 *offset)
{
int fd;
- u64 ofs = dso->data.debug_frame_offset;
+ u64 ofs = dso__data(dso)->debug_frame_offset;
/* debug_frame can reside in:
* - dso
@@ -379,7 +379,7 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
}
if (ofs <= 0) {
- fd = open(dso->symsrc_filename, O_RDONLY);
+ fd = open(dso__symsrc_filename(dso), O_RDONLY);
if (fd >= 0) {
ofs = elf_section_offset(fd, ".debug_frame");
close(fd);
@@ -402,21 +402,21 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
}
}
if (ofs > 0) {
- if (dso->symsrc_filename != NULL) {
+ if (dso__symsrc_filename(dso) != NULL) {
pr_warning(
"%s: overwrite symsrc(%s,%s)\n",
__func__,
- dso->symsrc_filename,
+ dso__symsrc_filename(dso),
debuglink);
- zfree(&dso->symsrc_filename);
+ dso__free_symsrc_filename(dso);
}
- dso->symsrc_filename = debuglink;
+ dso__set_symsrc_filename(dso, debuglink);
} else {
free(debuglink);
}
}
- dso->data.debug_frame_offset = ofs;
+ dso__data(dso)->debug_frame_offset = ofs;
}
*offset = ofs;
@@ -460,7 +460,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
return -EINVAL;
}
- pr_debug("unwind: find_proc_info dso %s\n", dso->name);
+ pr_debug("unwind: find_proc_info dso %s\n", dso__name(dso));
/* Check the .eh_frame section for unwinding info */
if (!read_unwind_spec_eh_frame(dso, ui, &table_data, &segbase, &fde_count)) {
@@ -481,7 +481,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
if (ret < 0 &&
!read_unwind_spec_debug_frame(dso, ui->machine, &segbase)) {
int fd = dso__data_get_fd(dso, ui->machine);
- int is_exec = elf_is_exec(fd, dso->name);
+ int is_exec = elf_is_exec(fd, dso__name(dso));
u64 start = map__start(map);
unw_word_t base = is_exec ? 0 : start;
const char *symfile;
@@ -489,7 +489,7 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
if (fd >= 0)
dso__data_put_fd(dso);
- symfile = dso->symsrc_filename ?: dso->name;
+ symfile = dso__symsrc_filename(dso) ?: dso__name(dso);
memset(&di, 0, sizeof(di));
if (dwarf_find_debug_frame(0, &di, ip, base, symfile, start, map__end(map)))
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 2728eb4f13..cb8be6acfb 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -25,7 +25,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized
return 0;
if (maps__addr_space(maps)) {
- pr_debug("unwind: thread map already set, dso=%s\n", dso->name);
+ pr_debug("unwind: thread map already set, dso=%s\n", dso__name(dso));
if (initialized)
*initialized = true;
return 0;
diff --git a/tools/perf/util/values.h b/tools/perf/util/values.h
index 8c41f22f42..791c1ad606 100644
--- a/tools/perf/util/values.h
+++ b/tools/perf/util/values.h
@@ -2,6 +2,7 @@
#ifndef __PERF_VALUES_H
#define __PERF_VALUES_H
+#include <stdio.h>
#include <linux/types.h>
struct perf_read_values {
diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c
index df89637961..1b6f8f6db7 100644
--- a/tools/perf/util/vdso.c
+++ b/tools/perf/util/vdso.c
@@ -133,8 +133,6 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
if (dso != NULL) {
__dsos__add(&machine->dsos, dso);
dso__set_long_name(dso, long_name, false);
- /* Put dso here because __dsos_add already got it */
- dso__put(dso);
}
return dso;
@@ -150,7 +148,7 @@ static int machine__thread_dso_type_maps_cb(struct map *map, void *data)
struct machine__thread_dso_type_maps_cb_args *args = data;
struct dso *dso = map__dso(map);
- if (!dso || dso->long_name[0] != '/')
+ if (!dso || dso__long_name(dso)[0] != '/')
return 0;
args->dso_type = dso__type(dso, args->machine);
@@ -252,17 +250,15 @@ static struct dso *__machine__findnew_compat(struct machine *machine,
const char *file_name;
struct dso *dso;
- dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
+ dso = dsos__find(&machine->dsos, vdso_file->dso_name, true);
if (dso)
- goto out;
+ return dso;
file_name = vdso__get_compat_file(vdso_file);
if (!file_name)
- goto out;
+ return NULL;
- dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
-out:
- return dso;
+ return __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
}
static int __machine__findnew_vdso_compat(struct machine *machine,
@@ -308,21 +304,21 @@ static struct dso *machine__find_vdso(struct machine *machine,
dso_type = machine__thread_dso_type(machine, thread);
switch (dso_type) {
case DSO__TYPE_32BIT:
- dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO32, true);
+ dso = dsos__find(&machine->dsos, DSO__NAME_VDSO32, true);
if (!dso) {
- dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO,
- true);
+ dso = dsos__find(&machine->dsos, DSO__NAME_VDSO,
+ true);
if (dso && dso_type != dso__type(dso, machine))
dso = NULL;
}
break;
case DSO__TYPE_X32BIT:
- dso = __dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true);
+ dso = dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true);
break;
case DSO__TYPE_64BIT:
case DSO__TYPE_UNKNOWN:
default:
- dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
+ dso = dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
break;
}
@@ -334,42 +330,38 @@ struct dso *machine__findnew_vdso(struct machine *machine,
{
struct vdso_info *vdso_info;
struct dso *dso = NULL;
+ char *file;
- down_write(&machine->dsos.lock);
if (!machine->vdso_info)
machine->vdso_info = vdso_info__new();
vdso_info = machine->vdso_info;
if (!vdso_info)
- goto out_unlock;
+ return NULL;
dso = machine__find_vdso(machine, thread);
if (dso)
- goto out_unlock;
+ return dso;
#if BITS_PER_LONG == 64
if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso))
- goto out_unlock;
+ return dso;
#endif
- dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
- if (!dso) {
- char *file;
+ dso = dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
+ if (dso)
+ return dso;
- file = get_file(&vdso_info->vdso);
- if (file)
- dso = __machine__addnew_vdso(machine, DSO__NAME_VDSO, file);
- }
+ file = get_file(&vdso_info->vdso);
+ if (!file)
+ return NULL;
-out_unlock:
- dso__get(dso);
- up_write(&machine->dsos.lock);
- return dso;
+ return __machine__addnew_vdso(machine, DSO__NAME_VDSO, file);
}
bool dso__is_vdso(struct dso *dso)
{
- return !strcmp(dso->short_name, DSO__NAME_VDSO) ||
- !strcmp(dso->short_name, DSO__NAME_VDSO32) ||
- !strcmp(dso->short_name, DSO__NAME_VDSOX32);
+ return !strcmp(dso__short_name(dso), DSO__NAME_VDSO) ||
+ !strcmp(dso__short_name(dso), DSO__NAME_VDSO32) ||
+ !strcmp(dso__short_name(dso), DSO__NAME_VDSOX32);
}