diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
commit | b20732900e4636a467c0183a47f7396700f5f743 (patch) | |
tree | 42f079ff82e701ebcb76829974b4caca3e5b6798 /tools/perf/util | |
parent | Adding upstream version 6.8.12. (diff) | |
download | linux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz linux-b20732900e4636a467c0183a47f7396700f5f743.zip |
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/util')
86 files changed, 3058 insertions, 1373 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 8027f450fa..e0a723e245 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -32,6 +32,7 @@ perf-y += perf_regs.o perf-y += perf-regs-arch/ perf-y += path.o perf-y += print_binary.o +perf-y += print_insn.o perf-y += rlimit.o perf-y += argv_split.o perf-y += rbtree.o @@ -71,6 +72,7 @@ perf-y += ordered-events.o perf-y += namespaces.o perf-y += comm.o perf-y += thread.o +perf-y += threads.o perf-y += thread_map.o perf-y += parse-events-flex.o perf-y += parse-events-bison.o diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index f22b4f1827..30c4d19fcf 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -9,10 +9,12 @@ #include <stdlib.h> #include <inttypes.h> +#include "annotate.h" #include "annotate-data.h" #include "debuginfo.h" #include "debug.h" #include "dso.h" +#include "dwarf-regs.h" #include "evsel.h" #include "evlist.h" #include "map.h" @@ -192,7 +194,8 @@ static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) } /* The type info will be saved in @type_die */ -static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) +static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset, + bool is_pointer) { Dwarf_Word size; @@ -204,14 +207,18 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) } /* - * It expects a pointer type for a memory access. - * Convert to a real type it points to. + * Usually it expects a pointer type for a memory access. + * Convert to a real type it points to. But global variables + * and local variables are accessed directly without a pointer. */ - if (dwarf_tag(type_die) != DW_TAG_pointer_type || - die_get_real_type(type_die, type_die) == NULL) { - pr_debug("no pointer or no type\n"); - ann_data_stat.no_typeinfo++; - return -1; + if (is_pointer) { + if ((dwarf_tag(type_die) != DW_TAG_pointer_type && + dwarf_tag(type_die) != DW_TAG_array_type) || + die_get_real_type(type_die, type_die) == NULL) { + pr_debug("no pointer or no type\n"); + ann_data_stat.no_typeinfo++; + return -1; + } } /* Get the size of the actual type */ @@ -232,13 +239,18 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) } /* The result will be saved in @type_die */ -static int find_data_type_die(struct debuginfo *di, u64 pc, - int reg, int offset, Dwarf_Die *type_die) +static int find_data_type_die(struct debuginfo *di, u64 pc, u64 addr, + const char *var_name, struct annotated_op_loc *loc, + Dwarf_Die *type_die) { Dwarf_Die cu_die, var_die; Dwarf_Die *scopes = NULL; + int reg, offset; int ret = -1; int i, nr_scopes; + int fbreg = -1; + bool is_fbreg = false; + int fb_offset = 0; /* Get a compile_unit for this address */ if (!find_cu_die(di, pc, &cu_die)) { @@ -247,19 +259,81 @@ static int find_data_type_die(struct debuginfo *di, u64 pc, return -1; } + reg = loc->reg1; + offset = loc->offset; + + if (reg == DWARF_REG_PC) { + if (die_find_variable_by_addr(&cu_die, pc, addr, &var_die, &offset)) { + ret = check_variable(&var_die, type_die, offset, + /*is_pointer=*/false); + loc->offset = offset; + goto out; + } + + if (var_name && die_find_variable_at(&cu_die, var_name, pc, + &var_die)) { + ret = check_variable(&var_die, type_die, 0, + /*is_pointer=*/false); + /* loc->offset will be updated by the caller */ + goto out; + } + } + /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */ nr_scopes = die_get_scopes(&cu_die, pc, &scopes); + if (reg != DWARF_REG_PC && dwarf_hasattr(&scopes[0], DW_AT_frame_base)) { + Dwarf_Attribute attr; + Dwarf_Block block; + + /* Check if the 'reg' is assigned as frame base register */ + if (dwarf_attr(&scopes[0], DW_AT_frame_base, &attr) != NULL && + dwarf_formblock(&attr, &block) == 0 && block.length == 1) { + switch (*block.data) { + case DW_OP_reg0 ... DW_OP_reg31: + fbreg = *block.data - DW_OP_reg0; + break; + case DW_OP_call_frame_cfa: + if (die_get_cfa(di->dbg, pc, &fbreg, + &fb_offset) < 0) + fbreg = -1; + break; + default: + break; + } + } + } + +retry: + is_fbreg = (reg == fbreg); + if (is_fbreg) + offset = loc->offset - fb_offset; + /* Search from the inner-most scope to the outer */ for (i = nr_scopes - 1; i >= 0; i--) { - /* Look up variables/parameters in this scope */ - if (!die_find_variable_by_reg(&scopes[i], pc, reg, &var_die)) - continue; + if (reg == DWARF_REG_PC) { + if (!die_find_variable_by_addr(&scopes[i], pc, addr, + &var_die, &offset)) + continue; + } else { + /* Look up variables/parameters in this scope */ + if (!die_find_variable_by_reg(&scopes[i], pc, reg, + &offset, is_fbreg, &var_die)) + continue; + } /* Found a variable, see if it's correct */ - ret = check_variable(&var_die, type_die, offset); + ret = check_variable(&var_die, type_die, offset, + reg != DWARF_REG_PC && !is_fbreg); + loc->offset = offset; goto out; } + + if (loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) { + reg = loc->reg2; + goto retry; + } + if (ret < 0) ann_data_stat.no_var++; @@ -272,15 +346,22 @@ out: * find_data_type - Return a data type at the location * @ms: map and symbol at the location * @ip: instruction address of the memory access - * @reg: register that holds the base address - * @offset: offset from the base address + * @loc: instruction operand location + * @addr: data address of the memory access + * @var_name: global variable name * * This functions searches the debug information of the binary to get the data - * type it accesses. The exact location is expressed by (ip, reg, offset). + * type it accesses. The exact location is expressed by (@ip, reg, offset) + * for pointer variables or (@ip, @addr) for global variables. Note that global + * variables might update the @loc->offset after finding the start of the variable. + * If it cannot find a global variable by address, it tried to fine a declaration + * of the variable using @var_name. In that case, @loc->offset won't be updated. + * * It return %NULL if not found. */ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, - int reg, int offset) + struct annotated_op_loc *loc, u64 addr, + const char *var_name) { struct annotated_data_type *result = NULL; struct dso *dso = map__dso(ms->map); @@ -300,7 +381,7 @@ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, * a file address for DWARF processing. */ pc = map__rip_2objdump(ms->map, ip); - if (find_data_type_die(di, pc, reg, offset, &type_die) < 0) + if (find_data_type_die(di, pc, addr, var_name, loc, &type_die) < 0) goto out; result = dso__findnew_data_type(dso, &type_die); diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index 8e73096c01..1b0db8e8c4 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -7,6 +7,7 @@ #include <linux/rbtree.h> #include <linux/types.h> +struct annotated_op_loc; struct evsel; struct map_symbol; @@ -69,6 +70,7 @@ struct annotated_data_type { }; extern struct annotated_data_type unknown_type; +extern struct annotated_data_type stackop_type; /** * struct annotated_data_stat - Debug statistics @@ -105,7 +107,8 @@ extern struct annotated_data_stat ann_data_stat; /* Returns data type at the location (ip, reg, offset) */ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, - int reg, int offset); + struct annotated_op_loc *loc, u64 addr, + const char *var_name); /* Update type access histogram at the given offset */ int annotated_data_type__update_samples(struct annotated_data_type *adt, @@ -119,7 +122,8 @@ void annotated_data_type__tree_delete(struct rb_root *root); static inline struct annotated_data_type * find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused, - int reg __maybe_unused, int offset __maybe_unused) + struct annotated_op_loc *loc __maybe_unused, + u64 addr __maybe_unused, const char *var_name __maybe_unused) { return NULL; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 86a996290e..79d082155c 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -37,6 +37,8 @@ #include "util/sharded_mutex.h" #include "arch/common.h" #include "namespaces.h" +#include "thread.h" +#include "hashmap.h" #include <regex.h> #include <linux/bitops.h> #include <linux/kernel.h> @@ -107,6 +109,14 @@ static struct ins_ops ret_ops; struct annotated_data_stat ann_data_stat; LIST_HEAD(ann_insn_stat); +/* Pseudo data types */ +struct annotated_data_type stackop_type = { + .self = { + .type_name = (char *)"(stack operation)", + .children = LIST_HEAD_INIT(stackop_type.self.children), + }, +}; + static int arch__grow_instructions(struct arch *arch) { struct ins *new_instructions; @@ -854,6 +864,17 @@ bool arch__is(struct arch *arch, const char *name) return !strcmp(arch->name, name); } +/* symbol histogram: key = offset << 16 | evsel->core.idx */ +static size_t sym_hist_hash(long key, void *ctx __maybe_unused) +{ + return (key >> 16) + (key & 0xffff); +} + +static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused) +{ + return key1 == key2; +} + static struct annotated_source *annotated_source__new(void) { struct annotated_source *src = zalloc(sizeof(*src)); @@ -866,40 +887,34 @@ static struct annotated_source *annotated_source__new(void) static __maybe_unused void annotated_source__delete(struct annotated_source *src) { + struct hashmap_entry *cur; + size_t bkt; + if (src == NULL) return; + + if (src->samples) { + hashmap__for_each_entry(src->samples, cur, bkt) + zfree(&cur->pvalue); + hashmap__free(src->samples); + } zfree(&src->histograms); free(src); } static int annotated_source__alloc_histograms(struct annotated_source *src, - size_t size, int nr_hists) + int nr_hists) { - size_t sizeof_sym_hist; - - /* - * Add buffer of one element for zero length symbol. - * When sample is taken from first instruction of - * zero length symbol, perf still resolves it and - * shows symbol name in perf report and allows to - * annotate it. - */ - if (size == 0) - size = 1; + src->nr_histograms = nr_hists; + src->histograms = calloc(nr_hists, sizeof(*src->histograms)); - /* Check for overflow when calculating sizeof_sym_hist */ - if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry)) + if (src->histograms == NULL) return -1; - sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry)); - - /* Check for overflow in zalloc argument */ - if (sizeof_sym_hist > SIZE_MAX / nr_hists) - return -1; + src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); + if (src->samples == NULL) + zfree(&src->histograms); - src->sizeof_sym_hist = sizeof_sym_hist; - src->nr_histograms = nr_hists; - src->histograms = calloc(nr_hists, sizeof_sym_hist) ; return src->histograms ? 0 : -1; } @@ -910,7 +925,8 @@ void symbol__annotate_zero_histograms(struct symbol *sym) annotation__lock(notes); if (notes->src != NULL) { memset(notes->src->histograms, 0, - notes->src->nr_histograms * notes->src->sizeof_sym_hist); + notes->src->nr_histograms * sizeof(*notes->src->histograms)); + hashmap__clear(notes->src->samples); } if (notes->branch && notes->branch->cycles_hist) { memset(notes->branch->cycles_hist, 0, @@ -974,8 +990,10 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, struct perf_sample *sample) { struct symbol *sym = ms->sym; - unsigned offset; + long hash_key; + u64 offset; struct sym_hist *h; + struct sym_hist_entry *entry; pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); @@ -993,15 +1011,26 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); return -ENOMEM; } + + hash_key = offset << 16 | evidx; + if (!hashmap__find(src->samples, hash_key, &entry)) { + entry = zalloc(sizeof(*entry)); + if (entry == NULL) + return -ENOMEM; + + if (hashmap__add(src->samples, hash_key, entry) < 0) + return -ENOMEM; + } + h->nr_samples++; - h->addr[offset].nr_samples++; h->period += sample->period; - h->addr[offset].period += sample->period; + entry->nr_samples++; + entry->period += sample->period; pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", sym->start, sym->name, addr, addr - sym->start, evidx, - h->addr[offset].nr_samples, h->addr[offset].period); + entry->nr_samples, entry->period); return 0; } @@ -1047,8 +1076,7 @@ struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) if (notes->src->histograms == NULL) { alloc_histograms: - annotated_source__alloc_histograms(notes->src, symbol__size(sym), - nr_hists); + annotated_source__alloc_histograms(notes->src, nr_hists); } return notes->src; @@ -2321,17 +2349,25 @@ out_remove_tmp: return err; } -static void calc_percent(struct sym_hist *sym_hist, - struct hists *hists, +static void calc_percent(struct annotation *notes, + struct evsel *evsel, struct annotation_data *data, s64 offset, s64 end) { + struct hists *hists = evsel__hists(evsel); + int evidx = evsel->core.idx; + struct sym_hist *sym_hist = annotation__histogram(notes, evidx); unsigned int hits = 0; u64 period = 0; while (offset < end) { - hits += sym_hist->addr[offset].nr_samples; - period += sym_hist->addr[offset].period; + struct sym_hist_entry *entry; + + entry = annotated_source__hist_entry(notes->src, evidx, offset); + if (entry) { + hits += entry->nr_samples; + period += entry->period; + } ++offset; } @@ -2368,16 +2404,13 @@ static void annotation__calc_percent(struct annotation *notes, end = next ? next->offset : len; for_each_group_evsel(evsel, leader) { - struct hists *hists = evsel__hists(evsel); struct annotation_data *data; - struct sym_hist *sym_hist; BUG_ON(i >= al->data_nr); - sym_hist = annotation__histogram(notes, evsel->core.idx); data = &al->data[i++]; - calc_percent(sym_hist, hists, data, al->offset, end); + calc_percent(notes, evsel, data, al->offset, end); } } } @@ -2575,14 +2608,19 @@ static void print_summary(struct rb_root *root, const char *filename) static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) { + int evidx = evsel->core.idx; struct annotation *notes = symbol__annotation(sym); - struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); + struct sym_hist *h = annotation__histogram(notes, evidx); u64 len = symbol__size(sym), offset; - for (offset = 0; offset < len; ++offset) - if (h->addr[offset].nr_samples != 0) + for (offset = 0; offset < len; ++offset) { + struct sym_hist_entry *entry; + + entry = annotated_source__hist_entry(notes->src, evidx, offset); + if (entry && entry->nr_samples != 0) printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, - sym->start + offset, h->addr[offset].nr_samples); + sym->start + offset, entry->nr_samples); + } printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); } @@ -2800,7 +2838,7 @@ void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evidx); - memset(h, 0, notes->src->sizeof_sym_hist); + memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms); } void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) @@ -2811,8 +2849,14 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) h->nr_samples = 0; for (offset = 0; offset < len; ++offset) { - h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8; - h->nr_samples += h->addr[offset].nr_samples; + struct sym_hist_entry *entry; + + entry = annotated_source__hist_entry(notes->src, evidx, offset); + if (entry == NULL) + continue; + + entry->nr_samples = entry->nr_samples * 7 / 8; + h->nr_samples += entry->nr_samples; } } @@ -2988,7 +3032,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m annotation__update_column_widths(notes); } -static void annotation__calc_lines(struct annotation *notes, struct map *map, +static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms, struct rb_root *root) { struct annotation_line *al; @@ -2996,6 +3040,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, list_for_each_entry(al, ¬es->src->source, node) { double percent_max = 0.0; + u64 addr; int i; for (i = 0; i < al->data_nr; i++) { @@ -3011,8 +3056,9 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, if (percent_max <= 0.5) continue; - al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL, - false, true, notes->start + al->offset); + addr = map__rip_2objdump(ms->map, ms->sym->start); + al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL, + false, true, ms->sym->start + al->offset); insert_source_line(&tmp_root, al); } @@ -3023,7 +3069,7 @@ static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) { struct annotation *notes = symbol__annotation(ms->sym); - annotation__calc_lines(notes, ms->map, root); + annotation__calc_lines(notes, ms, root); } int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) @@ -3566,8 +3612,22 @@ static int extract_reg_offset(struct arch *arch, const char *str, if (regname == NULL) return -1; - op_loc->reg = get_dwarf_regnum(regname, 0); + op_loc->reg1 = get_dwarf_regnum(regname, 0); free(regname); + + /* Get the second register */ + if (op_loc->multi_regs) { + p = strchr(p + 1, arch->objdump.register_char); + if (p == NULL) + return -1; + + regname = strdup(p); + if (regname == NULL) + return -1; + + op_loc->reg2 = get_dwarf_regnum(regname, 0); + free(regname); + } return 0; } @@ -3580,14 +3640,20 @@ static int extract_reg_offset(struct arch *arch, const char *str, * Get detailed location info (register and offset) in the instruction. * It needs both source and target operand and whether it accesses a * memory location. The offset field is meaningful only when the - * corresponding mem flag is set. + * corresponding mem flag is set. The reg2 field is meaningful only + * when multi_regs flag is set. * * Some examples on x86: * - * mov (%rax), %rcx # src_reg = rax, src_mem = 1, src_offset = 0 - * # dst_reg = rcx, dst_mem = 0 + * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 + * # dst_reg1 = rcx, dst_mem = 0 * - * mov 0x18, %r8 # src_reg = -1, dst_reg = r8 + * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 + * # dst_reg1 = r8, dst_mem = 0 + * + * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0 + * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 + * # dst_multi_regs = 1, dst_offset = 8 */ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, struct annotated_insn_loc *loc) @@ -3608,24 +3674,29 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, for_each_insn_op_loc(loc, i, op_loc) { const char *insn_str = ops->source.raw; + bool multi_regs = ops->source.multi_regs; - if (i == INSN_OP_TARGET) + if (i == INSN_OP_TARGET) { insn_str = ops->target.raw; + multi_regs = ops->target.multi_regs; + } /* Invalidate the register by default */ - op_loc->reg = -1; + op_loc->reg1 = -1; + op_loc->reg2 = -1; if (insn_str == NULL) continue; if (strchr(insn_str, arch->objdump.memory_ref_char)) { op_loc->mem_ref = true; + op_loc->multi_regs = multi_regs; extract_reg_offset(arch, insn_str, op_loc); } else { char *s = strdup(insn_str); if (s) { - op_loc->reg = get_dwarf_regnum(s, 0); + op_loc->reg1 = get_dwarf_regnum(s, 0); free(s); } } @@ -3663,8 +3734,17 @@ static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) notes = symbol__annotation(sym); list_for_each_entry(dl, ¬es->src->source, al.node) { - if (sym->start + dl->al.offset == ip) + if (sym->start + dl->al.offset == ip) { + /* + * llvm-objdump places "lock" in a separate line and + * in that case, we want to get the next line. + */ + if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') { + ip++; + continue; + } return dl; + } } return NULL; } @@ -3693,6 +3773,42 @@ static struct annotated_item_stat *annotate_data_stat(struct list_head *head, return istat; } +static bool is_stack_operation(struct arch *arch, struct disasm_line *dl) +{ + if (arch__is(arch, "x86")) { + if (!strncmp(dl->ins.name, "push", 4) || + !strncmp(dl->ins.name, "pop", 3) || + !strncmp(dl->ins.name, "ret", 3)) + return true; + } + + return false; +} + +u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, + struct disasm_line *dl) +{ + struct annotation *notes; + struct disasm_line *next; + u64 addr; + + notes = symbol__annotation(ms->sym); + /* + * PC-relative addressing starts from the next instruction address + * But the IP is for the current instruction. Since disasm_line + * doesn't have the instruction size, calculate it using the next + * disasm_line. If it's the last one, we can use symbol's end + * address directly. + */ + if (&dl->al.node == notes->src->source.prev) + addr = ms->sym->end + offset; + else { + next = list_next_entry(dl, al.node); + addr = ip + (next->al.offset - dl->al.offset) + offset; + } + return map__rip_2objdump(ms->map, addr); +} + /** * hist_entry__get_data_type - find data type for given hist entry * @he: hist entry @@ -3712,7 +3828,9 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) struct annotated_op_loc *op_loc; struct annotated_data_type *mem_type; struct annotated_item_stat *istat; - u64 ip = he->ip; + u64 ip = he->ip, addr = 0; + const char *var_name = NULL; + int var_offset; int i; ann_data_stat.total++; @@ -3745,6 +3863,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return NULL; } +retry: istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); if (istat == NULL) { ann_data_stat.no_insn++; @@ -3757,16 +3876,51 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return NULL; } + if (is_stack_operation(arch, dl)) { + istat->good++; + he->mem_type_off = 0; + return &stackop_type; + } + for_each_insn_op_loc(&loc, i, op_loc) { if (!op_loc->mem_ref) continue; - mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); + /* Recalculate IP because of LOCK prefix or insn fusion */ + ip = ms->sym->start + dl->al.offset; + + var_offset = op_loc->offset; + + /* PC-relative addressing */ + if (op_loc->reg1 == DWARF_REG_PC) { + struct addr_location al; + struct symbol *var; + u64 map_addr; + + addr = annotate_calc_pcrel(ms, ip, op_loc->offset, dl); + /* Kernel symbols might be relocated */ + map_addr = addr + map__reloc(ms->map); + + addr_location__init(&al); + var = thread__find_symbol_fb(he->thread, he->cpumode, + map_addr, &al); + if (var) { + var_name = var->name; + /* Calculate type offset from the start of variable */ + var_offset = map_addr - map__unmap_ip(al.map, var->start); + } + addr_location__exit(&al); + } + + mem_type = find_data_type(ms, ip, op_loc, addr, var_name); if (mem_type) istat->good++; else istat->bad++; + if (mem_type && var_name) + op_loc->offset = var_offset; + if (symbol_conf.annotate_data_sample) { annotated_data_type__update_samples(mem_type, evsel, op_loc->offset, @@ -3777,6 +3931,20 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return mem_type; } + /* + * Some instructions can be fused and the actual memory access came + * from the previous instruction. + */ + if (dl->al.offset > 0) { + struct disasm_line *prev_dl; + + prev_dl = list_prev_entry(dl, al.node); + if (ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { + dl = prev_dl; + goto retry; + } + } + ann_data_stat.no_mem_ops++; istat->bad++; return NULL; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index dba50762c6..13cc659e50 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -12,6 +12,7 @@ #include "symbol_conf.h" #include "mutex.h" #include "spark.h" +#include "hashmap.h" struct hist_browser_timer; struct hist_entry; @@ -238,12 +239,42 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r size_t disasm__fprintf(struct list_head *head, FILE *fp); void symbol__calc_percent(struct symbol *sym, struct evsel *evsel); +/** + * struct sym_hist - symbol histogram information for an event + * + * @nr_samples: Total number of samples. + * @period: Sum of sample periods. + */ struct sym_hist { u64 nr_samples; u64 period; - struct sym_hist_entry addr[]; }; +/** + * struct cyc_hist - (CPU) cycle histogram for a basic block + * + * @start: Start address of current block (if known). + * @cycles: Sum of cycles for the longest basic block. + * @cycles_aggr: Total cycles for this address. + * @cycles_max: Max cycles for this address. + * @cycles_min: Min cycles for this address. + * @cycles_spark: History of cycles for the longest basic block. + * @num: Number of samples for the longest basic block. + * @num_aggr: Total number of samples for this address. + * @have_start: Whether the current branch info has a start address. + * @reset: Number of resets due to a different start address. + * + * If sample has branch_stack and cycles info, it can construct basic blocks + * between two adjacent branches. It'd have start and end addresses but + * sometimes the start address may not be available. So the cycles are + * accounted at the end address. If multiple basic blocks end at the same + * address, it will take the longest one. + * + * The @start, @cycles, @cycles_spark and @num fields are used for the longest + * block only. Other fields are used for all cases. + * + * See __symbol__account_cycles(). + */ struct cyc_hist { u64 start; u64 cycles; @@ -258,18 +289,24 @@ struct cyc_hist { u16 reset; }; -/** struct annotated_source - symbols with hits have this attached as in sannotation +/** + * struct annotated_source - symbols with hits have this attached as in annotation * - * @histograms: Array of addr hit histograms per event being monitored - * nr_histograms: This may not be the same as evsel->evlist->core.nr_entries if + * @source: List head for annotated_line (embeded in disasm_line). + * @histograms: Array of symbol histograms per event to maintain the total number + * of samples and period. + * @nr_histograms: This may not be the same as evsel->evlist->core.nr_entries if * we have more than a group in a evlist, where we will want * to see each group separately, that is why symbol__annotate2() * sets src->nr_histograms to evsel->nr_members. - * @lines: If 'print_lines' is specified, per source code line percentages - * @source: source parsed from a disassembler like objdump -dS - * @cyc_hist: Average cycles per basic block + * @offsets: Array of annotation_line to be accessed by offset. + * @samples: Hash map of sym_hist_entry. Keyed by event index and offset in symbol. + * @nr_entries: Number of annotated_line in the source list. + * @nr_asm_entries: Number of annotated_line with actual asm instruction in the + * source list. + * @max_line_len: Maximum length of objdump output in an annotated_line. * - * lines is allocated, percentages calculated and all sorted by percentage + * disasm_lines are allocated, percentages calculated and all sorted by percentage * when the annotation is about to be presented, so the percentages are for * one of the entries in the histogram array, i.e. for the event/counter being * presented. It is deallocated right after symbol__{tui,tty,etc}_annotate @@ -277,15 +314,33 @@ struct cyc_hist { */ struct annotated_source { struct list_head source; - size_t sizeof_sym_hist; struct sym_hist *histograms; struct annotation_line **offsets; + struct hashmap *samples; int nr_histograms; int nr_entries; int nr_asm_entries; u16 max_line_len; }; +/** + * struct annotated_branch - basic block and IPC information for a symbol. + * + * @hit_cycles: Total executed cycles. + * @hit_insn: Total number of instructions executed. + * @total_insn: Number of instructions in the function. + * @cover_insn: Number of distinct, actually executed instructions. + * @cycles_hist: Array of cyc_hist for each instruction. + * @max_coverage: Maximum number of covered basic block (used for block-range). + * + * This struct is used by two different codes when the sample has branch stack + * and cycles information. annotation__compute_ipc() calculates average IPC + * using @hit_insn / @hit_cycles. The actual coverage can be calculated using + * @cover_insn / @total_insn. The @cycles_hist can give IPC for each (longest) + * basic block ends at the given address. + * process_basic_block() calculates coverage of instructions (or basic blocks) + * in the function. + */ struct annotated_branch { u64 hit_cycles; u64 hit_insn; @@ -346,7 +401,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m static inline struct sym_hist *annotated_source__histogram(struct annotated_source *src, int idx) { - return ((void *)src->histograms) + (src->sizeof_sym_hist * idx); + return &src->histograms[idx]; } static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) @@ -354,6 +409,17 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i return annotated_source__histogram(notes->src, idx); } +static inline struct sym_hist_entry * +annotated_source__hist_entry(struct annotated_source *src, int idx, u64 offset) +{ + struct sym_hist_entry *entry; + long key = offset << 16 | idx; + + if (!hashmap__find(src->samples, key, &entry)) + return NULL; + return entry; +} + static inline struct annotation *symbol__annotation(struct symbol *sym) { return (void *)sym - symbol_conf.priv_size; @@ -442,14 +508,18 @@ int annotate_check_args(void); /** * struct annotated_op_loc - Location info of instruction operand - * @reg: Register in the operand + * @reg1: First register in the operand + * @reg2: Second register in the operand * @offset: Memory access offset in the operand * @mem_ref: Whether the operand accesses memory + * @multi_regs: Whether the second register is used */ struct annotated_op_loc { - int reg; + int reg1; + int reg2; int offset; bool mem_ref; + bool multi_regs; }; enum annotated_insn_ops { @@ -487,4 +557,8 @@ struct annotated_item_stat { }; extern struct list_head ann_insn_stat; +/* Calculate PC-relative address */ +u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, + struct disasm_line *dl); + #endif /* __PERF_ANNOTATE_H */ diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 3684e6009b..ef314a5797 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -1466,6 +1466,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, char *endptr; bool period_type_set = false; bool period_set = false; + bool iy = false; synth_opts->set = true; @@ -1484,6 +1485,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, switch (*p++) { case 'i': case 'y': + iy = true; if (p[-1] == 'y') synth_opts->cycles = true; else @@ -1649,7 +1651,7 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, } } out: - if (synth_opts->instructions || synth_opts->cycles) { + if (iy) { if (!period_type_set) synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 3573e0b7ef..83709146a4 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -63,6 +63,7 @@ static int machine__process_bpf_event_load(struct machine *machine, dso->bpf_prog.id = id; dso->bpf_prog.sub_id = i; dso->bpf_prog.env = env; + map__put(map); } } return 0; diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c index 31ff19afc2..b4cb3fe5cc 100644 --- a/tools/perf/util/bpf_lock_contention.c +++ b/tools/perf/util/bpf_lock_contention.c @@ -179,6 +179,123 @@ int lock_contention_prepare(struct lock_contention *con) return 0; } +/* + * Run the BPF program directly using BPF_PROG_TEST_RUN to update the end + * timestamp in ktime so that it can calculate delta easily. + */ +static void mark_end_timestamp(void) +{ + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = BPF_F_TEST_RUN_ON_CPU, + ); + int prog_fd = bpf_program__fd(skel->progs.end_timestamp); + + bpf_prog_test_run_opts(prog_fd, &opts); +} + +static void update_lock_stat(int map_fd, int pid, u64 end_ts, + enum lock_aggr_mode aggr_mode, + struct tstamp_data *ts_data) +{ + u64 delta; + struct contention_key stat_key = {}; + struct contention_data stat_data; + + if (ts_data->timestamp >= end_ts) + return; + + delta = end_ts - ts_data->timestamp; + + switch (aggr_mode) { + case LOCK_AGGR_CALLER: + stat_key.stack_id = ts_data->stack_id; + break; + case LOCK_AGGR_TASK: + stat_key.pid = pid; + break; + case LOCK_AGGR_ADDR: + stat_key.lock_addr_or_cgroup = ts_data->lock; + break; + case LOCK_AGGR_CGROUP: + /* TODO */ + return; + default: + return; + } + + if (bpf_map_lookup_elem(map_fd, &stat_key, &stat_data) < 0) + return; + + stat_data.total_time += delta; + stat_data.count++; + + if (delta > stat_data.max_time) + stat_data.max_time = delta; + if (delta < stat_data.min_time) + stat_data.min_time = delta; + + bpf_map_update_elem(map_fd, &stat_key, &stat_data, BPF_EXIST); +} + +/* + * Account entries in the tstamp map (which didn't see the corresponding + * lock:contention_end tracepoint) using end_ts. + */ +static void account_end_timestamp(struct lock_contention *con) +{ + int ts_fd, stat_fd; + int *prev_key, key; + u64 end_ts = skel->bss->end_ts; + int total_cpus; + enum lock_aggr_mode aggr_mode = con->aggr_mode; + struct tstamp_data ts_data, *cpu_data; + + /* Iterate per-task tstamp map (key = TID) */ + ts_fd = bpf_map__fd(skel->maps.tstamp); + stat_fd = bpf_map__fd(skel->maps.lock_stat); + + prev_key = NULL; + while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { + if (bpf_map_lookup_elem(ts_fd, &key, &ts_data) == 0) { + int pid = key; + + if (aggr_mode == LOCK_AGGR_TASK && con->owner) + pid = ts_data.flags; + + update_lock_stat(stat_fd, pid, end_ts, aggr_mode, + &ts_data); + } + + prev_key = &key; + } + + /* Now it'll check per-cpu tstamp map which doesn't have TID. */ + if (aggr_mode == LOCK_AGGR_TASK || aggr_mode == LOCK_AGGR_CGROUP) + return; + + total_cpus = cpu__max_cpu().cpu; + ts_fd = bpf_map__fd(skel->maps.tstamp_cpu); + + cpu_data = calloc(total_cpus, sizeof(*cpu_data)); + if (cpu_data == NULL) + return; + + prev_key = NULL; + while (!bpf_map_get_next_key(ts_fd, prev_key, &key)) { + if (bpf_map_lookup_elem(ts_fd, &key, cpu_data) < 0) + goto next; + + for (int i = 0; i < total_cpus; i++) { + update_lock_stat(stat_fd, -1, end_ts, aggr_mode, + &cpu_data[i]); + } + +next: + prev_key = &key; + } + free(cpu_data); +} + int lock_contention_start(void) { skel->bss->enabled = 1; @@ -188,6 +305,7 @@ int lock_contention_start(void) int lock_contention_stop(void) { skel->bss->enabled = 0; + mark_end_timestamp(); return 0; } @@ -210,7 +328,7 @@ static const char *lock_contention_get_name(struct lock_contention *con, /* do not update idle comm which contains CPU number */ if (pid) { - struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid); + struct thread *t = machine__findnew_thread(machine, /*pid=*/-1, pid); if (t == NULL) return name; @@ -301,8 +419,10 @@ int lock_contention_read(struct lock_contention *con) if (stack_trace == NULL) return -1; + account_end_timestamp(con); + if (con->aggr_mode == LOCK_AGGR_TASK) { - struct thread *idle = __machine__findnew_thread(machine, + struct thread *idle = machine__findnew_thread(machine, /*pid=*/0, /*tid=*/0); thread__set_comm(idle, "swapper", /*timestamp=*/0); diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c index 52c270330a..2872f9bc07 100644 --- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c +++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c @@ -6,7 +6,7 @@ * payload expected by the 'perf trace' beautifiers. */ -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <linux/limits.h> @@ -22,19 +22,6 @@ #define MAX_CPUS 4096 -// FIXME: These should come from system headers -#ifndef bool -typedef char bool; -#endif -typedef int pid_t; -typedef long long int __s64; -typedef __s64 time64_t; - -struct timespec64 { - time64_t tv_sec; - long int tv_nsec; -}; - /* bpf-output associated map */ struct __augmented_syscalls__ { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c index e5d78565f4..d931a898c4 100644 --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c @@ -19,13 +19,6 @@ #define LCB_F_PERCPU (1U << 4) #define LCB_F_MUTEX (1U << 5) -struct tstamp_data { - __u64 timestamp; - __u64 lock; - __u32 flags; - __s32 stack_id; -}; - /* callstack storage */ struct { __uint(type, BPF_MAP_TYPE_STACK_TRACE); @@ -140,6 +133,8 @@ int perf_subsys_id = -1; /* determine the key of lock stat */ int aggr_mode; +__u64 end_ts; + /* error stat */ int task_fail; int stack_fail; @@ -562,4 +557,11 @@ int BPF_PROG(collect_lock_syms) return 0; } +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(end_timestamp) +{ + end_ts = bpf_ktime_get_ns(); + return 0; +} + char LICENSE[] SEC("license") = "Dual BSD/GPL"; diff --git a/tools/perf/util/bpf_skel/lock_data.h b/tools/perf/util/bpf_skel/lock_data.h index 08482daf61..36af11faad 100644 --- a/tools/perf/util/bpf_skel/lock_data.h +++ b/tools/perf/util/bpf_skel/lock_data.h @@ -3,6 +3,13 @@ #ifndef UTIL_BPF_SKEL_LOCK_DATA_H #define UTIL_BPF_SKEL_LOCK_DATA_H +struct tstamp_data { + u64 timestamp; + u64 lock; + u32 flags; + u32 stack_id; +}; + struct contention_key { u32 stack_id; u32 pid; diff --git a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h index ab84a6e1da..e9028235d7 100644 --- a/tools/perf/util/bpf_skel/vmlinux/vmlinux.h +++ b/tools/perf/util/bpf_skel/vmlinux/vmlinux.h @@ -20,6 +20,13 @@ typedef __s64 s64; typedef int pid_t; +typedef __s64 time64_t; + +struct timespec64 { + time64_t tv_sec; + long int tv_nsec; +}; + enum cgroup_subsys_id { perf_event_cgrp_id = 8, }; diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 8262f69118..7517d16c02 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -1157,7 +1157,7 @@ int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node * if (al->map == NULL) goto out; } - if (RC_CHK_EQUAL(al->maps, machine__kernel_maps(machine))) { + if (maps__equal(al->maps, machine__kernel_maps(machine))) { if (machine__is_host(machine)) { al->cpumode = PERF_RECORD_MISC_KERNEL; al->level = 'k'; diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 0581ee0fa5..356e30c42c 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -222,6 +222,8 @@ static int aggr_cpu_id__cmp(const void *a_pointer, const void *b_pointer) return a->socket - b->socket; else if (a->die != b->die) return a->die - b->die; + else if (a->cluster != b->cluster) + return a->cluster - b->cluster; else if (a->cache_lvl != b->cache_lvl) return a->cache_lvl - b->cache_lvl; else if (a->cache != b->cache) @@ -309,6 +311,30 @@ struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data) return id; } +int cpu__get_cluster_id(struct perf_cpu cpu) +{ + int value, ret = cpu__get_topology_int(cpu.cpu, "cluster_id", &value); + + return ret ?: value; +} + +struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data) +{ + int cluster = cpu__get_cluster_id(cpu); + struct aggr_cpu_id id; + + /* There is no cluster_id on legacy system. */ + if (cluster == -1) + cluster = 0; + + id = aggr_cpu_id__die(cpu, data); + if (aggr_cpu_id__is_empty(&id)) + return id; + + id.cluster = cluster; + return id; +} + int cpu__get_core_id(struct perf_cpu cpu) { int value, ret = cpu__get_topology_int(cpu.cpu, "core_id", &value); @@ -320,8 +346,8 @@ struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data) struct aggr_cpu_id id; int core = cpu__get_core_id(cpu); - /* aggr_cpu_id__die returns a struct with socket and die set. */ - id = aggr_cpu_id__die(cpu, data); + /* aggr_cpu_id__die returns a struct with socket die, and cluster set. */ + id = aggr_cpu_id__cluster(cpu, data); if (aggr_cpu_id__is_empty(&id)) return id; @@ -683,6 +709,7 @@ bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b a->node == b->node && a->socket == b->socket && a->die == b->die && + a->cluster == b->cluster && a->cache_lvl == b->cache_lvl && a->cache == b->cache && a->core == b->core && @@ -695,6 +722,7 @@ bool aggr_cpu_id__is_empty(const struct aggr_cpu_id *a) a->node == -1 && a->socket == -1 && a->die == -1 && + a->cluster == -1 && a->cache_lvl == -1 && a->cache == -1 && a->core == -1 && @@ -708,6 +736,7 @@ struct aggr_cpu_id aggr_cpu_id__empty(void) .node = -1, .socket = -1, .die = -1, + .cluster = -1, .cache_lvl = -1, .cache = -1, .core = -1, diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h index 9df2aeb34d..26cf76c693 100644 --- a/tools/perf/util/cpumap.h +++ b/tools/perf/util/cpumap.h @@ -20,6 +20,8 @@ struct aggr_cpu_id { int socket; /** The die id as read from /sys/devices/system/cpu/cpuX/topology/die_id. */ int die; + /** The cluster id as read from /sys/devices/system/cpu/cpuX/topology/cluster_id */ + int cluster; /** The cache level as read from /sys/devices/system/cpu/cpuX/cache/indexY/level */ int cache_lvl; /** @@ -87,6 +89,11 @@ int cpu__get_socket_id(struct perf_cpu cpu); */ int cpu__get_die_id(struct perf_cpu cpu); /** + * cpu__get_cluster_id - Returns the cluster id as read from + * /sys/devices/system/cpu/cpuX/topology/cluster_id for the given CPU + */ +int cpu__get_cluster_id(struct perf_cpu cpu); +/** * cpu__get_core_id - Returns the core id as read from * /sys/devices/system/cpu/cpuX/topology/core_id for the given CPU. */ @@ -127,9 +134,15 @@ struct aggr_cpu_id aggr_cpu_id__socket(struct perf_cpu cpu, void *data); */ struct aggr_cpu_id aggr_cpu_id__die(struct perf_cpu cpu, void *data); /** - * aggr_cpu_id__core - Create an aggr_cpu_id with the core, die and socket - * populated with the core, die and socket for cpu. The function signature is - * compatible with aggr_cpu_id_get_t. + * aggr_cpu_id__cluster - Create an aggr_cpu_id with cluster, die and socket + * populated with the cluster, die and socket for cpu. The function signature + * is compatible with aggr_cpu_id_get_t. + */ +struct aggr_cpu_id aggr_cpu_id__cluster(struct perf_cpu cpu, void *data); +/** + * aggr_cpu_id__core - Create an aggr_cpu_id with the core, cluster, die and + * socket populated with the core, die and socket for cpu. The function + * signature is compatible with aggr_cpu_id_get_t. */ struct aggr_cpu_id aggr_cpu_id__core(struct perf_cpu cpu, void *data); /** diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c index 5bb3c2ba95..09d57efd2d 100644 --- a/tools/perf/util/data-convert-json.c +++ b/tools/perf/util/data-convert-json.c @@ -284,7 +284,9 @@ static void output_headers(struct perf_session *session, struct convert_json *c) output_json_key_string(out, true, 2, "os-release", header->env.os_release); output_json_key_string(out, true, 2, "arch", header->env.arch); - output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc); + if (header->env.cpu_desc) + output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc); + output_json_key_string(out, true, 2, "cpuid", header->env.cpuid); output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online); output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail); diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c index 550675ce0b..08c4bfbd81 100644 --- a/tools/perf/util/data.c +++ b/tools/perf/util/data.c @@ -413,7 +413,7 @@ ssize_t perf_data_file__write(struct perf_data_file *file, } ssize_t perf_data__write(struct perf_data *data, - void *buf, size_t size) + void *buf, size_t size) { if (data->use_stdio) { if (fwrite(buf, size, 1, data->file.fptr) == 1) @@ -424,9 +424,9 @@ ssize_t perf_data__write(struct perf_data *data, } int perf_data__switch(struct perf_data *data, - const char *postfix, - size_t pos, bool at_exit, - char **new_filepath) + const char *postfix, + size_t pos, bool at_exit, + char **new_filepath) { int ret; diff --git a/tools/perf/util/data.h b/tools/perf/util/data.h index effcc195d7..110f3ebde3 100644 --- a/tools/perf/util/data.h +++ b/tools/perf/util/data.h @@ -80,7 +80,7 @@ int perf_data__open(struct perf_data *data); void perf_data__close(struct perf_data *data); ssize_t perf_data__read(struct perf_data *data, void *buf, size_t size); ssize_t perf_data__write(struct perf_data *data, - void *buf, size_t size); + void *buf, size_t size); ssize_t perf_data_file__write(struct perf_data_file *file, void *buf, size_t size); /* @@ -91,8 +91,8 @@ ssize_t perf_data_file__write(struct perf_data_file *file, * Return value is fd of new output. */ int perf_data__switch(struct perf_data *data, - const char *postfix, - size_t pos, bool at_exit, char **new_filepath); + const char *postfix, + size_t pos, bool at_exit, char **new_filepath); int perf_data__create_dir(struct perf_data *data, int nr); int perf_data__open_dir(struct perf_data *data); diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index e282b4ceb4..c39ee0fcb8 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -33,6 +33,7 @@ #endif int verbose; +int debug_kmaps; int debug_peo_args; bool dump_trace = false, quiet = false; int debug_ordered_events; @@ -229,6 +230,7 @@ static struct sublevel_option debug_opts[] = { { .name = "stderr", .value_ptr = &redirect_to_stderr}, { .name = "data-convert", .value_ptr = &debug_data_convert }, { .name = "perf-event-open", .value_ptr = &debug_peo_args }, + { .name = "kmaps", .value_ptr = &debug_kmaps }, { .name = NULL, } }; @@ -267,6 +269,7 @@ int perf_quiet_option(void) /* For debug variables that are used as bool types, set to 0. */ redirect_to_stderr = 0; debug_peo_args = 0; + debug_kmaps = 0; return 0; } diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index de8870980d..35a7a5ae76 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -9,6 +9,7 @@ #include <linux/compiler.h> extern int verbose; +extern int debug_kmaps; extern int debug_peo_args; extern bool quiet, dump_trace; extern int debug_ordered_events; diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 7aa5fee0da..f93e57e2fc 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1136,6 +1136,68 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf) return ret < 0 ? ret : strbuf_addf(buf, "\t%s", dwarf_diename(vr_die)); } +#if defined(HAVE_DWARF_GETLOCATIONS_SUPPORT) || defined(HAVE_DWARF_CFI_SUPPORT) +static int reg_from_dwarf_op(Dwarf_Op *op) +{ + switch (op->atom) { + case DW_OP_reg0 ... DW_OP_reg31: + return op->atom - DW_OP_reg0; + case DW_OP_breg0 ... DW_OP_breg31: + return op->atom - DW_OP_breg0; + case DW_OP_regx: + case DW_OP_bregx: + return op->number; + default: + break; + } + return -1; +} + +static int offset_from_dwarf_op(Dwarf_Op *op) +{ + switch (op->atom) { + case DW_OP_reg0 ... DW_OP_reg31: + case DW_OP_regx: + return 0; + case DW_OP_breg0 ... DW_OP_breg31: + return op->number; + case DW_OP_bregx: + return op->number2; + default: + break; + } + return -1; +} + +static bool check_allowed_ops(Dwarf_Op *ops, size_t nops) +{ + /* The first op is checked separately */ + ops++; + nops--; + + /* + * It needs to make sure if the location expression matches to the given + * register and offset exactly. Thus it rejects any complex expressions + * and only allows a few of selected operators that doesn't change the + * location. + */ + while (nops) { + switch (ops->atom) { + case DW_OP_stack_value: + case DW_OP_deref_size: + case DW_OP_deref: + case DW_OP_piece: + break; + default: + return false; + } + ops++; + nops--; + } + return true; +} +#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT || HAVE_DWARF_CFI_SUPPORT */ + #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT /** * die_get_var_innermost_scope - Get innermost scope range of given variable DIE @@ -1272,11 +1334,45 @@ struct find_var_data { unsigned reg; /* Access offset, set for global data */ int offset; + /* True if the current register is the frame base */ + bool is_fbreg; }; /* Max number of registers DW_OP_regN supports */ #define DWARF_OP_DIRECT_REGS 32 +static bool match_var_offset(Dwarf_Die *die_mem, struct find_var_data *data, + u64 addr_offset, u64 addr_type, bool is_pointer) +{ + Dwarf_Die type_die; + Dwarf_Word size; + + if (addr_offset == addr_type) { + /* Update offset relative to the start of the variable */ + data->offset = 0; + return true; + } + + if (die_get_real_type(die_mem, &type_die) == NULL) + return false; + + if (is_pointer && dwarf_tag(&type_die) == DW_TAG_pointer_type) { + /* Get the target type of the pointer */ + if (die_get_real_type(&type_die, &type_die) == NULL) + return false; + } + + if (dwarf_aggregate_size(&type_die, &size) < 0) + return false; + + if (addr_offset >= addr_type + size) + return false; + + /* Update offset relative to the start of the variable */ + data->offset = addr_offset - addr_type; + return true; +} + /* Only checks direct child DIEs in the given scope. */ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) { @@ -1301,13 +1397,42 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) if (start > data->pc) break; + /* Local variables accessed using frame base register */ + if (data->is_fbreg && ops->atom == DW_OP_fbreg && + data->offset >= (int)ops->number && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number, + /*is_pointer=*/false)) + return DIE_FIND_CB_END; + /* Only match with a simple case */ if (data->reg < DWARF_OP_DIRECT_REGS) { - if (ops->atom == (DW_OP_reg0 + data->reg) && nops == 1) + /* pointer variables saved in a register 0 to 31 */ + if (ops->atom == (DW_OP_reg0 + data->reg) && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, 0, + /*is_pointer=*/true)) + return DIE_FIND_CB_END; + + /* Local variables accessed by a register + offset */ + if (ops->atom == (DW_OP_breg0 + data->reg) && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number, + /*is_pointer=*/false)) return DIE_FIND_CB_END; } else { + /* pointer variables saved in a register 32 or above */ if (ops->atom == DW_OP_regx && ops->number == data->reg && - nops == 1) + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, 0, + /*is_pointer=*/true)) + return DIE_FIND_CB_END; + + /* Local variables accessed by a register + offset */ + if (ops->atom == DW_OP_bregx && data->reg == ops->number && + check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->offset, ops->number2, + /*is_poitner=*/false)) return DIE_FIND_CB_END; } } @@ -1319,18 +1444,29 @@ static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) * @sc_die: a scope DIE * @pc: the program address to find * @reg: the register number to find + * @poffset: pointer to offset, will be updated for fbreg case + * @is_fbreg: boolean value if the current register is the frame base * @die_mem: a buffer to save the resulting DIE * - * Find the variable DIE accessed by the given register. + * Find the variable DIE accessed by the given register. It'll update the @offset + * when the variable is in the stack. */ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + int *poffset, bool is_fbreg, Dwarf_Die *die_mem) { struct find_var_data data = { .pc = pc, .reg = reg, + .offset = *poffset, + .is_fbreg = is_fbreg, }; - return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); + Dwarf_Die *result; + + result = die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); + if (result) + *poffset = data.offset; + return result; } /* Only checks direct child DIEs in the given scope */ @@ -1341,8 +1477,6 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) ptrdiff_t off = 0; Dwarf_Attribute attr; Dwarf_Addr base, start, end; - Dwarf_Word size; - Dwarf_Die type_die; Dwarf_Op *ops; size_t nops; @@ -1359,24 +1493,10 @@ static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) if (data->addr < ops->number) continue; - if (data->addr == ops->number) { - /* Update offset relative to the start of the variable */ - data->offset = 0; + if (check_allowed_ops(ops, nops) && + match_var_offset(die_mem, data, data->addr, ops->number, + /*is_pointer=*/false)) return DIE_FIND_CB_END; - } - - if (die_get_real_type(die_mem, &type_die) == NULL) - continue; - - if (dwarf_aggregate_size(&type_die, &size) < 0) - continue; - - if (data->addr >= ops->number + size) - continue; - - /* Update offset relative to the start of the variable */ - data->offset = data->addr - ops->number; - return DIE_FIND_CB_END; } return DIE_FIND_CB_SIBLING; } @@ -1407,7 +1527,102 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, *offset = data.offset; return result; } -#endif + +static int __die_collect_vars_cb(Dwarf_Die *die_mem, void *arg) +{ + struct die_var_type **var_types = arg; + Dwarf_Die type_die; + int tag = dwarf_tag(die_mem); + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Op *ops; + size_t nops; + struct die_var_type *vt; + + if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter) + return DIE_FIND_CB_SIBLING; + + if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL) + return DIE_FIND_CB_SIBLING; + + /* + * Only collect the first location as it can reconstruct the + * remaining state by following the instructions. + * start = 0 means it covers the whole range. + */ + if (dwarf_getlocations(&attr, 0, &base, &start, &end, &ops, &nops) <= 0) + return DIE_FIND_CB_SIBLING; + + if (die_get_real_type(die_mem, &type_die) == NULL) + return DIE_FIND_CB_SIBLING; + + vt = malloc(sizeof(*vt)); + if (vt == NULL) + return DIE_FIND_CB_END; + + vt->die_off = dwarf_dieoffset(&type_die); + vt->addr = start; + vt->reg = reg_from_dwarf_op(ops); + vt->offset = offset_from_dwarf_op(ops); + vt->next = *var_types; + *var_types = vt; + + return DIE_FIND_CB_SIBLING; +} + +/** + * die_collect_vars - Save all variables and parameters + * @sc_die: a scope DIE + * @var_types: a pointer to save the resulting list + * + * Save all variables and parameters in the @sc_die and save them to @var_types. + * The @var_types is a singly-linked list containing type and location info. + * Actual type can be retrieved using dwarf_offdie() with 'die_off' later. + * + * Callers should free @var_types. + */ +void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types) +{ + Dwarf_Die die_mem; + + die_find_child(sc_die, __die_collect_vars_cb, (void *)var_types, &die_mem); +} +#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ + +#ifdef HAVE_DWARF_CFI_SUPPORT +/** + * die_get_cfa - Get frame base information + * @dwarf: a Dwarf info + * @pc: program address + * @preg: pointer for saved register + * @poffset: pointer for saved offset + * + * This function gets register and offset for CFA (Canonical Frame Address) + * by searching the CIE/FDE info. The CFA usually points to the start address + * of the current stack frame and local variables can be located using an offset + * from the CFA. The @preg and @poffset will be updated if it returns 0. + */ +int die_get_cfa(Dwarf *dwarf, u64 pc, int *preg, int *poffset) +{ + Dwarf_CFI *cfi; + Dwarf_Frame *frame = NULL; + Dwarf_Op *ops = NULL; + size_t nops; + + cfi = dwarf_getcfi(dwarf); + if (cfi == NULL) + return -1; + + if (!dwarf_cfi_addrframe(cfi, pc, &frame) && + !dwarf_frame_cfa(frame, &ops, &nops) && + check_allowed_ops(ops, nops)) { + *preg = reg_from_dwarf_op(ops); + *poffset = offset_from_dwarf_op(ops); + return 0; + } + return -1; +} +#endif /* HAVE_DWARF_CFI_SUPPORT */ /* * die_has_loclist - Check if DW_AT_location of @vr_die is a location list diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 4e64caac6d..efafd3a1f5 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -135,6 +135,15 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die, /* Get the list of including scopes */ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes); +/* Variable type information */ +struct die_var_type { + struct die_var_type *next; + u64 die_off; + u64 addr; + int reg; + int offset; +}; + #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT /* Get byte offset range of given variable DIE */ @@ -142,6 +151,7 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); /* Find a variable saved in the 'reg' at given address */ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + int *poffset, bool is_fbreg, Dwarf_Die *die_mem); /* Find a (global) variable located in the 'addr' */ @@ -149,6 +159,9 @@ Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, Dwarf_Addr addr, Dwarf_Die *die_mem, int *offset); +/* Save all variables and parameters in this scope */ +void die_collect_vars(Dwarf_Die *sc_die, struct die_var_type **var_types); + #else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, @@ -161,6 +174,8 @@ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unused, Dwarf_Addr pc __maybe_unused, int reg __maybe_unused, + int *poffset __maybe_unused, + bool is_fbreg __maybe_unused, Dwarf_Die *die_mem __maybe_unused) { return NULL; @@ -175,6 +190,26 @@ static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unu return NULL; } +static inline void die_collect_vars(Dwarf_Die *sc_die __maybe_unused, + struct die_var_type **var_types __maybe_unused) +{ +} + #endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ +#ifdef HAVE_DWARF_CFI_SUPPORT + +/* Get the frame base information from CFA */ +int die_get_cfa(Dwarf *dwarf, u64 pc, int *preg, int *poffset); + +#else /* HAVE_DWARF_CFI_SUPPORT */ + +static inline int die_get_cfa(Dwarf *dwarf __maybe_unused, u64 pc __maybe_unused, + int *preg __maybe_unused, int *poffset __maybe_unused) +{ + return -1; +} + +#endif /* HAVE_DWARF_CFI_SUPPORT */ + #endif /* _DWARF_AUX_H */ diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 7c527e65c1..2a2c37cc40 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -12,6 +12,7 @@ struct perf_cpu_map; struct cpu_topology_map { int socket_id; int die_id; + int cluster_id; int core_id; }; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 68f45e9e63..198903157f 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -511,7 +511,7 @@ size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *ma struct addr_location al; addr_location__init(&al); - al.map = map__get(maps__find(machine__kernel_maps(machine), tp->addr)); + al.map = maps__find(machine__kernel_maps(machine), tp->addr); if (al.map && map__load(al.map) >= 0) { al.addr = map__map_ip(al.map, tp->addr); al.sym = map__find_symbol(al.map, al.addr); @@ -641,7 +641,7 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, return NULL; } al->maps = maps__get(maps); - al->map = map__get(maps__find(maps, al->addr)); + al->map = maps__find(maps, al->addr); if (al->map != NULL) { /* * Kernel maps might be changed when loading symbols so loading diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 727dae445d..3536404e94 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2850,6 +2850,39 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; } +char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name) +{ + static struct tep_format_field *prev_state_field; + static const char *states; + struct tep_format_field *field; + unsigned long long val; + unsigned int bit; + char state = '?'; /* '?' denotes unknown task state */ + + field = evsel__field(evsel, name); + + if (!field) + return state; + + if (!states || field != prev_state_field) { + states = parse_task_states(field); + if (!states) + return state; + prev_state_field = field; + } + + /* + * Note since the kernel exposes TASK_REPORT_MAX to userspace + * to denote the 'preempted' state, we might as welll report + * 'R' for this case, which make senses to users as well. + * + * We can change this if we have a good reason in the future. + */ + val = evsel__intval(evsel, sample, name); + bit = val ? ffs(val) : 0; + state = (!bit || bit > strlen(states)) ? 'R' : states[bit-1]; + return state; +} #endif bool evsel__fallback(struct evsel *evsel, struct target *target, int err, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index efbb6e8482..517cff431d 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -339,6 +339,7 @@ struct perf_sample; void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name); u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name); u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name); +char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name); static inline char *evsel__strval(struct evsel *evsel, struct perf_sample *sample, const char *name) { diff --git a/tools/perf/util/expr.l b/tools/perf/util/expr.l index 0feef0726c..a2fc43159e 100644 --- a/tools/perf/util/expr.l +++ b/tools/perf/util/expr.l @@ -94,6 +94,14 @@ static int literal(yyscan_t scanner, const struct expr_scanner_ctx *sctx) } return LITERAL; } + +static int nan_value(yyscan_t scanner) +{ + YYSTYPE *yylval = expr_get_lval(scanner); + + yylval->num = NAN; + return NUMBER; +} %} number ([0-9]+\.?[0-9]*|[0-9]*\.?[0-9]+)(e-?[0-9]+)? @@ -115,6 +123,7 @@ else { return ELSE; } source_count { return SOURCE_COUNT; } has_event { return HAS_EVENT; } strcmp_cpuid_str { return STRCMP_CPUID_STR; } +NaN { return nan_value(yyscanner); } {literal} { return literal(yyscanner, sctx); } {number} { return value(yyscanner); } {symbol} { return str(yyscanner, ID, sctx->runtime); } diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index b450178e34..e733f6b1f7 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1319,6 +1319,8 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder, bool no_tip) bool ret = false; decoder->state.type &= ~INTEL_PT_BRANCH; + decoder->state.insn_op = INTEL_PT_OP_OTHER; + decoder->state.insn_len = 0; if (decoder->set_fup_cfe_ip || decoder->set_fup_cfe) { bool ip = decoder->set_fup_cfe_ip; diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index f38893e0b0..4db9a098f5 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -764,6 +764,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, addr_location__init(&al); intel_pt_insn->length = 0; + intel_pt_insn->op = INTEL_PT_OP_OTHER; if (to_ip && *ip == to_ip) goto out_no_cache; @@ -898,6 +899,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, if (to_ip && *ip == to_ip) { intel_pt_insn->length = 0; + intel_pt_insn->op = INTEL_PT_OP_OTHER; goto out_no_cache; } diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index b397a76900..07c22f765f 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -43,9 +43,6 @@ #include <linux/string.h> #include <linux/zalloc.h> -static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, - struct thread *th, bool lock); - static struct dso *machine__kernel_dso(struct machine *machine) { return map__dso(machine->vmlinux_map); @@ -58,35 +55,6 @@ static void dsos__init(struct dsos *dsos) init_rwsem(&dsos->lock); } -static void machine__threads_init(struct machine *machine) -{ - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - threads->entries = RB_ROOT_CACHED; - init_rwsem(&threads->lock); - threads->nr = 0; - threads->last_match = NULL; - } -} - -static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd) -{ - int to_find = (int) *((pid_t *)key); - - return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread); -} - -static struct thread_rb_node *thread_rb_node__find(const struct thread *th, - struct rb_root *tree) -{ - pid_t to_find = thread__tid(th); - struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid); - - return rb_entry(nd, struct thread_rb_node, rb_node); -} - static int machine__set_mmap_name(struct machine *machine) { if (machine__is_host(machine)) @@ -120,7 +88,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) RB_CLEAR_NODE(&machine->rb_node); dsos__init(&machine->dsos); - machine__threads_init(machine); + threads__init(&machine->threads); machine->vdso_info = NULL; machine->env = NULL; @@ -221,27 +189,11 @@ static void dsos__exit(struct dsos *dsos) void machine__delete_threads(struct machine *machine) { - struct rb_node *nd; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - down_write(&threads->lock); - nd = rb_first_cached(&threads->entries); - while (nd) { - struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); - - nd = rb_next(nd); - __machine__remove_thread(machine, trb, trb->thread, false); - } - up_write(&threads->lock); - } + threads__remove_all_threads(&machine->threads); } void machine__exit(struct machine *machine) { - int i; - if (machine == NULL) return; @@ -254,12 +206,7 @@ void machine__exit(struct machine *machine) zfree(&machine->current_tid); zfree(&machine->kallsyms_filename); - machine__delete_threads(machine); - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - - exit_rwsem(&threads->lock); - } + threads__exit(&machine->threads); } void machine__delete(struct machine *machine) @@ -440,7 +387,7 @@ static struct thread *findnew_guest_code(struct machine *machine, return NULL; /* Assume maps are set up if there are any */ - if (maps__nr_maps(thread__maps(thread))) + if (!maps__empty(thread__maps(thread))) return thread; host_thread = machine__find_thread(host_machine, -1, pid); @@ -526,7 +473,7 @@ static void machine__update_thread_pid(struct machine *machine, if (thread__pid(th) == thread__tid(th)) return; - leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); + leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th)); if (!leader) goto out_err; @@ -561,159 +508,55 @@ out_err: } /* - * Front-end cache - TID lookups come in blocks, - * so most of the time we dont have to look up - * the full rbtree: - */ -static struct thread* -__threads__get_last_match(struct threads *threads, struct machine *machine, - int pid, int tid) -{ - struct thread *th; - - th = threads->last_match; - if (th != NULL) { - if (thread__tid(th) == tid) { - machine__update_thread_pid(machine, th, pid); - return thread__get(th); - } - thread__put(threads->last_match); - threads->last_match = NULL; - } - - return NULL; -} - -static struct thread* -threads__get_last_match(struct threads *threads, struct machine *machine, - int pid, int tid) -{ - struct thread *th = NULL; - - if (perf_singlethreaded) - th = __threads__get_last_match(threads, machine, pid, tid); - - return th; -} - -static void -__threads__set_last_match(struct threads *threads, struct thread *th) -{ - thread__put(threads->last_match); - threads->last_match = thread__get(th); -} - -static void -threads__set_last_match(struct threads *threads, struct thread *th) -{ - if (perf_singlethreaded) - __threads__set_last_match(threads, th); -} - -/* * Caller must eventually drop thread->refcnt returned with a successful * lookup/new thread inserted. */ -static struct thread *____machine__findnew_thread(struct machine *machine, - struct threads *threads, - pid_t pid, pid_t tid, - bool create) +static struct thread *__machine__findnew_thread(struct machine *machine, + pid_t pid, + pid_t tid, + bool create) { - struct rb_node **p = &threads->entries.rb_root.rb_node; - struct rb_node *parent = NULL; - struct thread *th; - struct thread_rb_node *nd; - bool leftmost = true; + struct thread *th = threads__find(&machine->threads, tid); + bool created; - th = threads__get_last_match(threads, machine, pid, tid); - if (th) + if (th) { + machine__update_thread_pid(machine, th, pid); return th; - - while (*p != NULL) { - parent = *p; - th = rb_entry(parent, struct thread_rb_node, rb_node)->thread; - - if (thread__tid(th) == tid) { - threads__set_last_match(threads, th); - machine__update_thread_pid(machine, th, pid); - return thread__get(th); - } - - if (tid < thread__tid(th)) - p = &(*p)->rb_left; - else { - p = &(*p)->rb_right; - leftmost = false; - } } - if (!create) return NULL; - th = thread__new(pid, tid); - if (th == NULL) - return NULL; - - nd = malloc(sizeof(*nd)); - if (nd == NULL) { - thread__put(th); - return NULL; - } - nd->thread = th; - - rb_link_node(&nd->rb_node, parent, p); - rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost); - /* - * We have to initialize maps separately after rb tree is updated. - * - * The reason is that we call machine__findnew_thread within - * thread__init_maps to find the thread leader and that would screwed - * the rb tree. - */ - if (thread__init_maps(th, machine)) { - pr_err("Thread init failed thread %d\n", pid); - rb_erase_cached(&nd->rb_node, &threads->entries); - RB_CLEAR_NODE(&nd->rb_node); - free(nd); - thread__put(th); - return NULL; - } - /* - * It is now in the rbtree, get a ref - */ - threads__set_last_match(threads, th); - ++threads->nr; - - return thread__get(th); -} + th = threads__findnew(&machine->threads, pid, tid, &created); + if (created) { + /* + * We have to initialize maps separately after rb tree is + * updated. + * + * The reason is that we call machine__findnew_thread within + * thread__init_maps to find the thread leader and that would + * screwed the rb tree. + */ + if (thread__init_maps(th, machine)) { + pr_err("Thread init failed thread %d\n", pid); + threads__remove(&machine->threads, th); + thread__put(th); + return NULL; + } + } else + machine__update_thread_pid(machine, th, pid); -struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) -{ - return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true); + return th; } -struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, - pid_t tid) +struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid) { - struct threads *threads = machine__threads(machine, tid); - struct thread *th; - - down_write(&threads->lock); - th = __machine__findnew_thread(machine, pid, tid); - up_write(&threads->lock); - return th; + return __machine__findnew_thread(machine, pid, tid, /*create=*/true); } struct thread *machine__find_thread(struct machine *machine, pid_t pid, pid_t tid) { - struct threads *threads = machine__threads(machine, tid); - struct thread *th; - - down_read(&threads->lock); - th = ____machine__findnew_thread(machine, threads, pid, tid, false); - up_read(&threads->lock); - return th; + return __machine__findnew_thread(machine, pid, tid, /*create=*/false); } /* @@ -896,7 +739,6 @@ static int machine__process_ksymbol_register(struct machine *machine, struct symbol *sym; struct dso *dso; struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr); - bool put_map = false; int err = 0; if (!map) { @@ -913,12 +755,6 @@ static int machine__process_ksymbol_register(struct machine *machine, err = -ENOMEM; goto out; } - /* - * The inserted map has a get on it, we need to put to release - * the reference count here, but do it after all accesses are - * done. - */ - put_map = true; if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) { dso->binary_type = DSO_BINARY_TYPE__OOL; dso->data.file_size = event->ksymbol.len; @@ -952,8 +788,7 @@ static int machine__process_ksymbol_register(struct machine *machine, } dso__insert_symbol(dso, sym); out: - if (put_map) - map__put(map); + map__put(map); return err; } @@ -977,7 +812,7 @@ static int machine__process_ksymbol_unregister(struct machine *machine, if (sym) dso__delete_symbol(dso, sym); } - + map__put(map); return 0; } @@ -1005,11 +840,11 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event, perf_event__fprintf_text_poke(event, machine, stdout); if (!event->text_poke.new_len) - return 0; + goto out; if (cpumode != PERF_RECORD_MISC_KERNEL) { pr_debug("%s: unsupported cpumode - ignoring\n", __func__); - return 0; + goto out; } if (dso) { @@ -1032,7 +867,8 @@ int machine__process_text_poke(struct machine *machine, union perf_event *event, pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n", event->text_poke.addr); } - +out: + map__put(map); return 0; } @@ -1120,29 +956,30 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp) return printed; } -size_t machine__fprintf(struct machine *machine, FILE *fp) -{ - struct rb_node *nd; - size_t ret; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - struct threads *threads = &machine->threads[i]; - - down_read(&threads->lock); +struct machine_fprintf_cb_args { + FILE *fp; + size_t printed; +}; - ret = fprintf(fp, "Threads: %u\n", threads->nr); +static int machine_fprintf_cb(struct thread *thread, void *data) +{ + struct machine_fprintf_cb_args *args = data; - for (nd = rb_first_cached(&threads->entries); nd; - nd = rb_next(nd)) { - struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread; + /* TODO: handle fprintf errors. */ + args->printed += thread__fprintf(thread, args->fp); + return 0; +} - ret += thread__fprintf(pos, fp); - } +size_t machine__fprintf(struct machine *machine, FILE *fp) +{ + struct machine_fprintf_cb_args args = { + .fp = fp, + .printed = 0, + }; + size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads)); - up_read(&threads->lock); - } - return ret; + machine__for_each_thread(machine, machine_fprintf_cb, &args); + return ret + args.printed; } static struct dso *machine__get_kernel(struct machine *machine) @@ -1300,9 +1137,10 @@ static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data) return 0; dest_map = maps__find(args->kmaps, map__pgoff(map)); - if (dest_map != map) + if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map)) map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); + map__put(dest_map); args->found = true; return 0; } @@ -1543,8 +1381,10 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo return 0; long_name = strdup(path); - if (long_name == NULL) + if (long_name == NULL) { + map__put(map); return -ENOMEM; + } dso = map__dso(map); dso__set_long_name(dso, long_name, true); @@ -1558,7 +1398,7 @@ static int maps__set_module_path(struct maps *maps, const char *path, struct kmo dso->symtab_type++; dso->comp = m->comp; } - + map__put(map); return 0; } @@ -1709,8 +1549,8 @@ static int machine__update_kernel_mmap(struct machine *machine, updated = map__get(orig); machine->vmlinux_map = updated; - machine__set_kernel_mmap(machine, start, end); maps__remove(machine__kernel_maps(machine), orig); + machine__set_kernel_mmap(machine, start, end); err = maps__insert(machine__kernel_maps(machine), updated); map__put(orig); @@ -1765,8 +1605,10 @@ int machine__create_kernel_maps(struct machine *machine) struct map *next = maps__find_next_entry(machine__kernel_maps(machine), machine__kernel_map(machine)); - if (next) + if (next) { machine__set_kernel_mmap(machine, start, map__start(next)); + map__put(next); + } } out_put: @@ -2060,36 +1902,9 @@ out_problem: return 0; } -static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd, - struct thread *th, bool lock) -{ - struct threads *threads = machine__threads(machine, thread__tid(th)); - - if (!nd) - nd = thread_rb_node__find(th, &threads->entries.rb_root); - - if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th)) - threads__set_last_match(threads, NULL); - - if (lock) - down_write(&threads->lock); - - BUG_ON(refcount_read(thread__refcnt(th)) == 0); - - thread__put(nd->thread); - rb_erase_cached(&nd->rb_node, &threads->entries); - RB_CLEAR_NODE(&nd->rb_node); - --threads->nr; - - free(nd); - - if (lock) - up_write(&threads->lock); -} - void machine__remove_thread(struct machine *machine, struct thread *th) { - return __machine__remove_thread(machine, NULL, th, true); + return threads__remove(&machine->threads, th); } int machine__process_fork_event(struct machine *machine, union perf_event *event, @@ -3223,23 +3038,7 @@ int machine__for_each_thread(struct machine *machine, int (*fn)(struct thread *thread, void *p), void *priv) { - struct threads *threads; - struct rb_node *nd; - int rc = 0; - int i; - - for (i = 0; i < THREADS__TABLE_SIZE; i++) { - threads = &machine->threads[i]; - for (nd = rb_first_cached(&threads->entries); nd; - nd = rb_next(nd)) { - struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node); - - rc = fn(trb->thread, priv); - if (rc != 0) - return rc; - } - } - return rc; + return threads__for_each_thread(&machine->threads, fn, priv); } int machines__for_each_thread(struct machines *machines, @@ -3263,6 +3062,36 @@ int machines__for_each_thread(struct machines *machines, return rc; } + +static int thread_list_cb(struct thread *thread, void *data) +{ + struct list_head *list = data; + struct thread_list *entry = malloc(sizeof(*entry)); + + if (!entry) + return -ENOMEM; + + entry->thread = thread__get(thread); + list_add_tail(&entry->list, list); + return 0; +} + +int machine__thread_list(struct machine *machine, struct list_head *list) +{ + return machine__for_each_thread(machine, thread_list_cb, list); +} + +void thread_list__delete(struct list_head *list) +{ + struct thread_list *pos, *next; + + list_for_each_entry_safe(pos, next, list, list) { + thread__zput(pos->thread); + list_del(&pos->list); + free(pos); + } +} + pid_t machine__get_current_tid(struct machine *machine, int cpu) { if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz) diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 1279acda6a..e28c787616 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -7,6 +7,7 @@ #include "maps.h" #include "dsos.h" #include "rwsem.h" +#include "threads.h" struct addr_location; struct branch_stack; @@ -28,16 +29,6 @@ extern const char *ref_reloc_sym_names[]; struct vdso_info; -#define THREADS__TABLE_BITS 8 -#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) - -struct threads { - struct rb_root_cached entries; - struct rw_semaphore lock; - unsigned int nr; - struct thread *last_match; -}; - struct machine { struct rb_node rb_node; pid_t pid; @@ -48,7 +39,7 @@ struct machine { char *root_dir; char *mmap_name; char *kallsyms_filename; - struct threads threads[THREADS__TABLE_SIZE]; + struct threads threads; struct vdso_info *vdso_info; struct perf_env *env; struct dsos dsos; @@ -69,12 +60,6 @@ struct machine { bool trampolines_mapped; }; -static inline struct threads *machine__threads(struct machine *machine, pid_t tid) -{ - /* Cast it to handle tid == -1 */ - return &machine->threads[(unsigned int)tid % THREADS__TABLE_SIZE]; -} - /* * The main kernel (vmlinux) map */ @@ -220,7 +205,6 @@ bool machine__is(struct machine *machine, const char *arch); bool machine__normalized_is(struct machine *machine, const char *arch); int machine__nr_cpus_avail(struct machine *machine); -struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid); struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id); @@ -280,6 +264,16 @@ int machines__for_each_thread(struct machines *machines, int (*fn)(struct thread *thread, void *p), void *priv); +struct thread_list { + struct list_head list; + struct thread *thread; +}; + +/* Make a list of struct thread_list based on threads in the machine. */ +int machine__thread_list(struct machine *machine, struct list_head *list); +/* Free up the nodes within the thread_list list. */ +void thread_list__delete(struct list_head *list); + pid_t machine__get_current_tid(struct machine *machine, int cpu); int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 54c67cb7ec..14a5ea70d8 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -168,6 +168,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, if (dso == NULL) goto out_delete; + assert(!dso->kernel); map__init(result, start, start + len, pgoff, dso); if (anon || no_dso) { @@ -552,10 +553,6 @@ u64 map__rip_2objdump(struct map *map, u64 rip) if (dso->rel) return rip - map__pgoff(map); - /* - * kernel modules also have DSO_TYPE_USER in dso->kernel, - * but all kernel modules are ET_REL, so won't get here. - */ if (dso->kernel == DSO_SPACE__USER) return rip + dso->text_offset; @@ -584,10 +581,6 @@ u64 map__objdump_2mem(struct map *map, u64 ip) if (dso->rel) return map__unmap_ip(map, ip + map__pgoff(map)); - /* - * kernel modules also have DSO_TYPE_USER in dso->kernel, - * but all kernel modules are ET_REL, so won't get here. - */ if (dso->kernel == DSO_SPACE__USER) return map__unmap_ip(map, ip - dso->text_offset); diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 0334fc18d9..ce13145a9f 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -6,81 +6,285 @@ #include "dso.h" #include "map.h" #include "maps.h" +#include "rwsem.h" #include "thread.h" #include "ui/ui.h" #include "unwind.h" +#include <internal/rc_check.h> -struct map_rb_node { - struct rb_node rb_node; - struct map *map; +/* + * Locking/sorting note: + * + * Sorting is done with the write lock, iteration and binary searching happens + * under the read lock requiring being sorted. There is a race between sorting + * releasing the write lock and acquiring the read lock for iteration/searching + * where another thread could insert and break the sorting of the maps. In + * practice inserting maps should be rare meaning that the race shouldn't lead + * to live lock. Removal of maps doesn't break being sorted. + */ + +DECLARE_RC_STRUCT(maps) { + struct rw_semaphore lock; + /** + * @maps_by_address: array of maps sorted by their starting address if + * maps_by_address_sorted is true. + */ + struct map **maps_by_address; + /** + * @maps_by_name: optional array of maps sorted by their dso name if + * maps_by_name_sorted is true. + */ + struct map **maps_by_name; + struct machine *machine; +#ifdef HAVE_LIBUNWIND_SUPPORT + void *addr_space; + const struct unwind_libunwind_ops *unwind_libunwind_ops; +#endif + refcount_t refcnt; + /** + * @nr_maps: number of maps_by_address, and possibly maps_by_name, + * entries that contain maps. + */ + unsigned int nr_maps; + /** + * @nr_maps_allocated: number of entries in maps_by_address and possibly + * maps_by_name. + */ + unsigned int nr_maps_allocated; + /** + * @last_search_by_name_idx: cache of last found by name entry's index + * as frequent searches for the same dso name are common. + */ + unsigned int last_search_by_name_idx; + /** @maps_by_address_sorted: is maps_by_address sorted. */ + bool maps_by_address_sorted; + /** @maps_by_name_sorted: is maps_by_name sorted. */ + bool maps_by_name_sorted; + /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ + bool ends_broken; }; -#define maps__for_each_entry(maps, map) \ - for (map = maps__first(maps); map; map = map_rb_node__next(map)) +static void check_invariants(const struct maps *maps __maybe_unused) +{ +#ifndef NDEBUG + assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); + for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { + struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; + + /* Check map is well-formed. */ + assert(map__end(map) == 0 || map__start(map) <= map__end(map)); + /* Expect at least 1 reference count. */ + assert(refcount_read(map__refcnt(map)) > 0); + + if (map__dso(map) && map__dso(map)->kernel) + assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); + + if (i > 0) { + struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; + + /* If addresses are sorted... */ + if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { + /* Maps should be in start address order. */ + assert(map__start(prev) <= map__start(map)); + /* + * If the ends of maps aren't broken (during + * construction) then they should be ordered + * too. + */ + if (!RC_CHK_ACCESS(maps)->ends_broken) { + assert(map__end(prev) <= map__end(map)); + assert(map__end(prev) <= map__start(map) || + map__start(prev) == map__start(map)); + } + } + } + } + if (RC_CHK_ACCESS(maps)->maps_by_name) { + for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { + struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i]; -#define maps__for_each_entry_safe(maps, map, next) \ - for (map = maps__first(maps), next = map_rb_node__next(map); map; \ - map = next, next = map_rb_node__next(map)) + /* + * Maps by name maps should be in maps_by_address, so + * the reference count should be higher. + */ + assert(refcount_read(map__refcnt(map)) > 1); + } + } +#endif +} -static struct rb_root *maps__entries(struct maps *maps) +static struct map **maps__maps_by_address(const struct maps *maps) { - return &RC_CHK_ACCESS(maps)->entries; + return RC_CHK_ACCESS(maps)->maps_by_address; } -static struct rw_semaphore *maps__lock(struct maps *maps) +static void maps__set_maps_by_address(struct maps *maps, struct map **new) { - return &RC_CHK_ACCESS(maps)->lock; + RC_CHK_ACCESS(maps)->maps_by_address = new; + +} + +static struct map ***maps__maps_by_name_addr(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->maps_by_name; +} + +static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated) +{ + RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated; +} + +static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps) +{ + RC_CHK_ACCESS(maps)->nr_maps = nr_maps; } -static struct map **maps__maps_by_name(struct maps *maps) +/* Not in the header, to aid reference counting. */ +static struct map **maps__maps_by_name(const struct maps *maps) { return RC_CHK_ACCESS(maps)->maps_by_name; + } -static struct map_rb_node *maps__first(struct maps *maps) +static void maps__set_maps_by_name(struct maps *maps, struct map **new) { - struct rb_node *first = rb_first(maps__entries(maps)); + RC_CHK_ACCESS(maps)->maps_by_name = new; - if (first) - return rb_entry(first, struct map_rb_node, rb_node); - return NULL; } -static struct map_rb_node *map_rb_node__next(struct map_rb_node *node) +static bool maps__maps_by_address_sorted(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->maps_by_address_sorted; +} + +static void maps__set_maps_by_address_sorted(struct maps *maps, bool value) { - struct rb_node *next; + RC_CHK_ACCESS(maps)->maps_by_address_sorted = value; +} - if (!node) - return NULL; +static bool maps__maps_by_name_sorted(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->maps_by_name_sorted; +} - next = rb_next(&node->rb_node); +static void maps__set_maps_by_name_sorted(struct maps *maps, bool value) +{ + RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; +} - if (!next) - return NULL; +struct machine *maps__machine(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->machine; +} - return rb_entry(next, struct map_rb_node, rb_node); +unsigned int maps__nr_maps(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->nr_maps; } -static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map) +refcount_t *maps__refcnt(struct maps *maps) { - struct map_rb_node *rb_node; + return &RC_CHK_ACCESS(maps)->refcnt; +} - maps__for_each_entry(maps, rb_node) { - if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)) - return rb_node; - } - return NULL; +#ifdef HAVE_LIBUNWIND_SUPPORT +void *maps__addr_space(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->addr_space; +} + +void maps__set_addr_space(struct maps *maps, void *addr_space) +{ + RC_CHK_ACCESS(maps)->addr_space = addr_space; +} + +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; +} + +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) +{ + RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; +} +#endif + +static struct rw_semaphore *maps__lock(struct maps *maps) +{ + /* + * When the lock is acquired or released the maps invariants should + * hold. + */ + check_invariants(maps); + return &RC_CHK_ACCESS(maps)->lock; } static void maps__init(struct maps *maps, struct machine *machine) { - refcount_set(maps__refcnt(maps), 1); init_rwsem(maps__lock(maps)); - RC_CHK_ACCESS(maps)->entries = RB_ROOT; + RC_CHK_ACCESS(maps)->maps_by_address = NULL; + RC_CHK_ACCESS(maps)->maps_by_name = NULL; RC_CHK_ACCESS(maps)->machine = machine; - RC_CHK_ACCESS(maps)->last_search_by_name = NULL; +#ifdef HAVE_LIBUNWIND_SUPPORT + RC_CHK_ACCESS(maps)->addr_space = NULL; + RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL; +#endif + refcount_set(maps__refcnt(maps), 1); RC_CHK_ACCESS(maps)->nr_maps = 0; - RC_CHK_ACCESS(maps)->maps_by_name = NULL; + RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; + RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0; + RC_CHK_ACCESS(maps)->maps_by_address_sorted = true; + RC_CHK_ACCESS(maps)->maps_by_name_sorted = false; +} + +static void maps__exit(struct maps *maps) +{ + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + map__zput(maps_by_address[i]); + if (maps_by_name) + map__zput(maps_by_name[i]); + } + zfree(&maps_by_address); + zfree(&maps_by_name); + unwind__finish_access(maps); +} + +struct maps *maps__new(struct machine *machine) +{ + struct maps *result; + RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); + + if (ADD_RC_CHK(result, maps)) + maps__init(result, machine); + + return result; +} + +static void maps__delete(struct maps *maps) +{ + maps__exit(maps); + RC_CHK_FREE(maps); +} + +struct maps *maps__get(struct maps *maps) +{ + struct maps *result; + + if (RC_CHK_GET(result, maps)) + refcount_inc(maps__refcnt(maps)); + + return result; +} + +void maps__put(struct maps *maps) +{ + if (maps && refcount_dec_and_test(maps__refcnt(maps))) + maps__delete(maps); + else + RC_CHK_PUT(maps); } static void __maps__free_maps_by_name(struct maps *maps) @@ -92,219 +296,330 @@ static void __maps__free_maps_by_name(struct maps *maps) map__put(maps__maps_by_name(maps)[i]); zfree(&RC_CHK_ACCESS(maps)->maps_by_name); - RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; } -static int __maps__insert(struct maps *maps, struct map *map) +static int map__start_cmp(const void *a, const void *b) { - struct rb_node **p = &maps__entries(maps)->rb_node; - struct rb_node *parent = NULL; - const u64 ip = map__start(map); - struct map_rb_node *m, *new_rb_node; - - new_rb_node = malloc(sizeof(*new_rb_node)); - if (!new_rb_node) - return -ENOMEM; - - RB_CLEAR_NODE(&new_rb_node->rb_node); - new_rb_node->map = map__get(map); - - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct map_rb_node, rb_node); - if (ip < map__start(m->map)) - p = &(*p)->rb_left; - else - p = &(*p)->rb_right; + const struct map *map_a = *(const struct map * const *)a; + const struct map *map_b = *(const struct map * const *)b; + u64 map_a_start = map__start(map_a); + u64 map_b_start = map__start(map_b); + + if (map_a_start == map_b_start) { + u64 map_a_end = map__end(map_a); + u64 map_b_end = map__end(map_b); + + if (map_a_end == map_b_end) { + /* Ensure maps with the same addresses have a fixed order. */ + if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b)) + return 0; + return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b) + ? 1 : -1; + } + return map_a_end > map_b_end ? 1 : -1; } - - rb_link_node(&new_rb_node->rb_node, parent, p); - rb_insert_color(&new_rb_node->rb_node, maps__entries(maps)); - return 0; + return map_a_start > map_b_start ? 1 : -1; } -int maps__insert(struct maps *maps, struct map *map) +static void __maps__sort_by_address(struct maps *maps) { - int err; - const struct dso *dso = map__dso(map); + if (maps__maps_by_address_sorted(maps)) + return; + + qsort(maps__maps_by_address(maps), + maps__nr_maps(maps), + sizeof(struct map *), + map__start_cmp); + maps__set_maps_by_address_sorted(maps, true); +} +static void maps__sort_by_address(struct maps *maps) +{ down_write(maps__lock(maps)); - err = __maps__insert(maps, map); - if (err) - goto out; - - ++RC_CHK_ACCESS(maps)->nr_maps; + __maps__sort_by_address(maps); + up_write(maps__lock(maps)); +} - if (dso && dso->kernel) { - struct kmap *kmap = map__kmap(map); +static int map__strcmp(const void *a, const void *b) +{ + const struct map *map_a = *(const struct map * const *)a; + const struct map *map_b = *(const struct map * const *)b; + const struct dso *dso_a = map__dso(map_a); + const struct dso *dso_b = map__dso(map_b); + int ret = strcmp(dso_a->short_name, dso_b->short_name); - if (kmap) - kmap->kmaps = maps; - else - pr_err("Internal error: kernel dso with non kernel map\n"); + if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) { + /* Ensure distinct but name equal maps have an order. */ + return map__start_cmp(a, b); } + return ret; +} +static int maps__sort_by_name(struct maps *maps) +{ + int err = 0; + down_write(maps__lock(maps)); + if (!maps__maps_by_name_sorted(maps)) { + struct map **maps_by_name = maps__maps_by_name(maps); - /* - * If we already performed some search by name, then we need to add the just - * inserted map and resort. - */ - if (maps__maps_by_name(maps)) { - if (maps__nr_maps(maps) > RC_CHK_ACCESS(maps)->nr_maps_allocated) { - int nr_allocate = maps__nr_maps(maps) * 2; - struct map **maps_by_name = realloc(maps__maps_by_name(maps), - nr_allocate * sizeof(map)); - - if (maps_by_name == NULL) { - __maps__free_maps_by_name(maps); + if (!maps_by_name) { + maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated * + sizeof(*maps_by_name)); + if (!maps_by_name) err = -ENOMEM; - goto out; - } + else { + struct map **maps_by_address = maps__maps_by_address(maps); + unsigned int n = maps__nr_maps(maps); - RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; - RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; + maps__set_maps_by_name(maps, maps_by_name); + for (unsigned int i = 0; i < n; i++) + maps_by_name[i] = map__get(maps_by_address[i]); + } + } + if (!err) { + qsort(maps_by_name, + maps__nr_maps(maps), + sizeof(struct map *), + map__strcmp); + maps__set_maps_by_name_sorted(maps, true); } - maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map__get(map); - __maps__sort_by_name(maps); } - out: up_write(maps__lock(maps)); return err; } -static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node) +static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map) { - rb_erase_init(&rb_node->rb_node, maps__entries(maps)); - map__put(rb_node->map); - free(rb_node); + struct map **maps_by_address = maps__maps_by_address(maps); + + if (maps__maps_by_address_sorted(maps)) { + struct map **mapp = + bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps), + sizeof(*mapp), map__start_cmp); + + if (mapp) + return mapp - maps_by_address; + } else { + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map)) + return i; + } + } + pr_err("Map missing from maps"); + return -1; } -void maps__remove(struct maps *maps, struct map *map) +static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map) { - struct map_rb_node *rb_node; - - down_write(maps__lock(maps)); - if (RC_CHK_ACCESS(maps)->last_search_by_name == map) - RC_CHK_ACCESS(maps)->last_search_by_name = NULL; - - rb_node = maps__find_node(maps, map); - assert(rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)); - __maps__remove(maps, rb_node); - if (maps__maps_by_name(maps)) - __maps__free_maps_by_name(maps); - --RC_CHK_ACCESS(maps)->nr_maps; - up_write(maps__lock(maps)); + struct map **maps_by_name = maps__maps_by_name(maps); + + if (maps__maps_by_name_sorted(maps)) { + struct map **mapp = + bsearch(&map, maps_by_name, maps__nr_maps(maps), + sizeof(*mapp), map__strcmp); + + if (mapp) + return mapp - maps_by_name; + } else { + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map)) + return i; + } + } + pr_err("Map missing from maps"); + return -1; } -static void __maps__purge(struct maps *maps) +static int __maps__insert(struct maps *maps, struct map *new) { - struct map_rb_node *pos, *next; + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + const struct dso *dso = map__dso(new); + unsigned int nr_maps = maps__nr_maps(maps); + unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; + + if (nr_maps + 1 > nr_allocate) { + nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; + + maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); + if (!maps_by_address) + return -ENOMEM; + + maps__set_maps_by_address(maps, maps_by_address); + if (maps_by_name) { + maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); + if (!maps_by_name) { + /* + * If by name fails, just disable by name and it will + * recompute next time it is required. + */ + __maps__free_maps_by_name(maps); + } + maps__set_maps_by_name(maps, maps_by_name); + } + RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; + } + /* Insert the value at the end. */ + maps_by_address[nr_maps] = map__get(new); + if (maps_by_name) + maps_by_name[nr_maps] = map__get(new); - if (maps__maps_by_name(maps)) - __maps__free_maps_by_name(maps); + nr_maps++; + RC_CHK_ACCESS(maps)->nr_maps = nr_maps; - maps__for_each_entry_safe(maps, pos, next) { - rb_erase_init(&pos->rb_node, maps__entries(maps)); - map__put(pos->map); - free(pos); + /* + * Recompute if things are sorted. If things are inserted in a sorted + * manner, for example by processing /proc/pid/maps, then no + * sorting/resorting will be necessary. + */ + if (nr_maps == 1) { + /* If there's just 1 entry then maps are sorted. */ + maps__set_maps_by_address_sorted(maps, true); + maps__set_maps_by_name_sorted(maps, maps_by_name != NULL); + } else { + /* Sorted if maps were already sorted and this map starts after the last one. */ + maps__set_maps_by_address_sorted(maps, + maps__maps_by_address_sorted(maps) && + map__end(maps_by_address[nr_maps - 2]) <= map__start(new)); + maps__set_maps_by_name_sorted(maps, false); } + if (map__end(new) < map__start(new)) + RC_CHK_ACCESS(maps)->ends_broken = true; + if (dso && dso->kernel) { + struct kmap *kmap = map__kmap(new); + + if (kmap) + kmap->kmaps = maps; + else + pr_err("Internal error: kernel dso with non kernel map\n"); + } + return 0; } -static void maps__exit(struct maps *maps) +int maps__insert(struct maps *maps, struct map *map) { + int ret; + down_write(maps__lock(maps)); - __maps__purge(maps); + ret = __maps__insert(maps, map); up_write(maps__lock(maps)); + return ret; } -bool maps__empty(struct maps *maps) -{ - return !maps__first(maps); -} - -struct maps *maps__new(struct machine *machine) +static void __maps__remove(struct maps *maps, struct map *map) { - struct maps *result; - RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); - - if (ADD_RC_CHK(result, maps)) - maps__init(result, machine); + struct map **maps_by_address = maps__maps_by_address(maps); + struct map **maps_by_name = maps__maps_by_name(maps); + unsigned int nr_maps = maps__nr_maps(maps); + unsigned int address_idx; + + /* Slide later mappings over the one to remove */ + address_idx = maps__by_address_index(maps, map); + map__put(maps_by_address[address_idx]); + memmove(&maps_by_address[address_idx], + &maps_by_address[address_idx + 1], + (nr_maps - address_idx - 1) * sizeof(*maps_by_address)); + + if (maps_by_name) { + unsigned int name_idx = maps__by_name_index(maps, map); + + map__put(maps_by_name[name_idx]); + memmove(&maps_by_name[name_idx], + &maps_by_name[name_idx + 1], + (nr_maps - name_idx - 1) * sizeof(*maps_by_name)); + } - return result; + --RC_CHK_ACCESS(maps)->nr_maps; } -static void maps__delete(struct maps *maps) +void maps__remove(struct maps *maps, struct map *map) { - maps__exit(maps); - unwind__finish_access(maps); - RC_CHK_FREE(maps); + down_write(maps__lock(maps)); + __maps__remove(maps, map); + up_write(maps__lock(maps)); } -struct maps *maps__get(struct maps *maps) +bool maps__empty(struct maps *maps) { - struct maps *result; + bool res; - if (RC_CHK_GET(result, maps)) - refcount_inc(maps__refcnt(maps)); + down_read(maps__lock(maps)); + res = maps__nr_maps(maps) == 0; + up_read(maps__lock(maps)); - return result; + return res; } -void maps__put(struct maps *maps) +bool maps__equal(struct maps *a, struct maps *b) { - if (maps && refcount_dec_and_test(maps__refcnt(maps))) - maps__delete(maps); - else - RC_CHK_PUT(maps); + return RC_CHK_EQUAL(a, b); } int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) { - struct map_rb_node *pos; + bool done = false; int ret = 0; - down_read(maps__lock(maps)); - maps__for_each_entry(maps, pos) { - ret = cb(pos->map, data); - if (ret) - break; + /* See locking/sorting note. */ + while (!done) { + down_read(maps__lock(maps)); + if (maps__maps_by_address_sorted(maps)) { + /* + * maps__for_each_map callbacks may buggily/unsafely + * insert into maps_by_address. Deliberately reload + * maps__nr_maps and maps_by_address on each iteration + * to avoid using memory freed by maps__insert growing + * the array - this may cause maps to be skipped or + * repeated. + */ + for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { + struct map **maps_by_address = maps__maps_by_address(maps); + struct map *map = maps_by_address[i]; + + ret = cb(map, data); + if (ret) + break; + } + done = true; + } + up_read(maps__lock(maps)); + if (!done) + maps__sort_by_address(maps); } - up_read(maps__lock(maps)); return ret; } void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) { - struct map_rb_node *pos, *next; - unsigned int start_nr_maps; + struct map **maps_by_address; down_write(maps__lock(maps)); - start_nr_maps = maps__nr_maps(maps); - maps__for_each_entry_safe(maps, pos, next) { - if (cb(pos->map, data)) { - __maps__remove(maps, pos); - --RC_CHK_ACCESS(maps)->nr_maps; - } + maps_by_address = maps__maps_by_address(maps); + for (unsigned int i = 0; i < maps__nr_maps(maps);) { + if (cb(maps_by_address[i], data)) + __maps__remove(maps, maps_by_address[i]); + else + i++; } - if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps)) - __maps__free_maps_by_name(maps); - up_write(maps__lock(maps)); } struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) { struct map *map = maps__find(maps, addr); + struct symbol *result = NULL; /* Ensure map is loaded before using map->map_ip */ - if (map != NULL && map__load(map) >= 0) { - if (mapp != NULL) - *mapp = map; - return map__find_symbol(map, map__map_ip(map, addr)); - } + if (map != NULL && map__load(map) >= 0) + result = map__find_symbol(map, map__map_ip(map, addr)); + + if (mapp) + *mapp = map; + else + map__put(map); - return NULL; + return result; } struct maps__find_symbol_by_name_args { @@ -393,24 +708,28 @@ size_t maps__fprintf(struct maps *maps, FILE *fp) * Find first map where end > map->start. * Same as find_vma() in kernel. */ -static struct rb_node *first_ending_after(struct maps *maps, const struct map *map) +static unsigned int first_ending_after(struct maps *maps, const struct map *map) { - struct rb_root *root; - struct rb_node *next, *first; + struct map **maps_by_address = maps__maps_by_address(maps); + int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1; + + assert(maps__maps_by_address_sorted(maps)); + if (low <= high && map__end(maps_by_address[0]) > map__start(map)) + return 0; - root = maps__entries(maps); - next = root->rb_node; - first = NULL; - while (next) { - struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); + while (low <= high) { + int mid = (low + high) / 2; + struct map *pos = maps_by_address[mid]; - if (map__end(pos->map) > map__start(map)) { - first = next; - if (map__start(pos->map) <= map__start(map)) + if (map__end(pos) > map__start(map)) { + first = mid; + if (map__start(pos) <= map__start(map)) { + /* Entry overlaps map. */ break; - next = next->rb_left; + } + high = mid - 1; } else - next = next->rb_right; + low = mid + 1; } return first; } @@ -419,171 +738,249 @@ static struct rb_node *first_ending_after(struct maps *maps, const struct map *m * Adds new to maps, if new overlaps existing entries then the existing maps are * adjusted or removed so that new fits without overlapping any entries. */ -int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) +static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) { - - struct rb_node *next; + struct map **maps_by_address; int err = 0; FILE *fp = debug_file(); - down_write(maps__lock(maps)); +sort_again: + if (!maps__maps_by_address_sorted(maps)) + __maps__sort_by_address(maps); - next = first_ending_after(maps, new); - while (next && !err) { - struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); - next = rb_next(&pos->rb_node); + maps_by_address = maps__maps_by_address(maps); + /* + * Iterate through entries where the end of the existing entry is + * greater-than the new map's start. + */ + for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { + struct map *pos = maps_by_address[i]; + struct map *before = NULL, *after = NULL; /* * Stop if current map starts after map->end. * Maps are ordered by start: next will not overlap for sure. */ - if (map__start(pos->map) >= map__end(new)) + if (map__start(pos) >= map__end(new)) break; - if (verbose >= 2) { - - if (use_browser) { - pr_debug("overlapping maps in %s (disable tui for more info)\n", - map__dso(new)->name); - } else { - pr_debug("overlapping maps:\n"); - map__fprintf(new, fp); - map__fprintf(pos->map, fp); - } + if (use_browser) { + pr_debug("overlapping maps in %s (disable tui for more info)\n", + map__dso(new)->name); + } else if (verbose >= 2) { + pr_debug("overlapping maps:\n"); + map__fprintf(new, fp); + map__fprintf(pos, fp); } - rb_erase_init(&pos->rb_node, maps__entries(maps)); /* * Now check if we need to create new maps for areas not * overlapped by the new map: */ - if (map__start(new) > map__start(pos->map)) { - struct map *before = map__clone(pos->map); + if (map__start(new) > map__start(pos)) { + /* Map starts within existing map. Need to shorten the existing map. */ + before = map__clone(pos); if (before == NULL) { err = -ENOMEM; - goto put_map; + goto out_err; } - map__set_end(before, map__start(new)); - err = __maps__insert(maps, before); - if (err) { - map__put(before); - goto put_map; - } if (verbose >= 2 && !use_browser) map__fprintf(before, fp); - map__put(before); } - - if (map__end(new) < map__end(pos->map)) { - struct map *after = map__clone(pos->map); + if (map__end(new) < map__end(pos)) { + /* The new map isn't as long as the existing map. */ + after = map__clone(pos); if (after == NULL) { + map__zput(before); err = -ENOMEM; - goto put_map; + goto out_err; } map__set_start(after, map__end(new)); - map__add_pgoff(after, map__end(new) - map__start(pos->map)); - assert(map__map_ip(pos->map, map__end(new)) == - map__map_ip(after, map__end(new))); - err = __maps__insert(maps, after); - if (err) { - map__put(after); - goto put_map; - } + map__add_pgoff(after, map__end(new) - map__start(pos)); + assert(map__map_ip(pos, map__end(new)) == + map__map_ip(after, map__end(new))); + if (verbose >= 2 && !use_browser) map__fprintf(after, fp); - map__put(after); } -put_map: - map__put(pos->map); - free(pos); + /* + * If adding one entry, for `before` or `after`, we can replace + * the existing entry. If both `before` and `after` are + * necessary than an insert is needed. If the existing entry + * entirely overlaps the existing entry it can just be removed. + */ + if (before) { + map__put(maps_by_address[i]); + maps_by_address[i] = before; + /* Maps are still ordered, go to next one. */ + i++; + if (after) { + __maps__insert(maps, after); + map__put(after); + if (!maps__maps_by_address_sorted(maps)) { + /* + * Sorting broken so invariants don't + * hold, sort and go again. + */ + goto sort_again; + } + /* + * Maps are still ordered, skip after and go to + * next one (terminate loop). + */ + i++; + } + } else if (after) { + map__put(maps_by_address[i]); + maps_by_address[i] = after; + /* Maps are ordered, go to next one. */ + i++; + } else { + __maps__remove(maps, pos); + /* + * Maps are ordered but no need to increase `i` as the + * later maps were moved down. + */ + } + check_invariants(maps); } /* Add the map. */ - err = __maps__insert(maps, new); - up_write(maps__lock(maps)); + __maps__insert(maps, new); +out_err: return err; } -int maps__copy_from(struct maps *maps, struct maps *parent) +int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) { int err; - struct map_rb_node *rb_node; + down_write(maps__lock(maps)); + err = __maps__fixup_overlap_and_insert(maps, new); + up_write(maps__lock(maps)); + return err; +} + +int maps__copy_from(struct maps *dest, struct maps *parent) +{ + /* Note, if struct map were immutable then cloning could use ref counts. */ + struct map **parent_maps_by_address; + int err = 0; + unsigned int n; + + down_write(maps__lock(dest)); down_read(maps__lock(parent)); - maps__for_each_entry(parent, rb_node) { - struct map *new = map__clone(rb_node->map); + parent_maps_by_address = maps__maps_by_address(parent); + n = maps__nr_maps(parent); + if (maps__nr_maps(dest) == 0) { + /* No existing mappings so just copy from parent to avoid reallocs in insert. */ + unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated; + struct map **dest_maps_by_address = + malloc(nr_maps_allocated * sizeof(struct map *)); + struct map **dest_maps_by_name = NULL; - if (new == NULL) { + if (!dest_maps_by_address) err = -ENOMEM; - goto out_unlock; + else { + if (maps__maps_by_name(parent)) { + dest_maps_by_name = + malloc(nr_maps_allocated * sizeof(struct map *)); + } + + RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address; + RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name; + RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated; } - err = unwind__prepare_access(maps, new, NULL); - if (err) - goto out_unlock; + for (unsigned int i = 0; !err && i < n; i++) { + struct map *pos = parent_maps_by_address[i]; + struct map *new = map__clone(pos); - err = maps__insert(maps, new); - if (err) - goto out_unlock; + if (!new) + err = -ENOMEM; + else { + err = unwind__prepare_access(dest, new, NULL); + if (!err) { + dest_maps_by_address[i] = new; + if (dest_maps_by_name) + dest_maps_by_name[i] = map__get(new); + RC_CHK_ACCESS(dest)->nr_maps = i + 1; + } + } + if (err) + map__put(new); + } + maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent)); + if (!err) { + RC_CHK_ACCESS(dest)->last_search_by_name_idx = + RC_CHK_ACCESS(parent)->last_search_by_name_idx; + maps__set_maps_by_name_sorted(dest, + dest_maps_by_name && + maps__maps_by_name_sorted(parent)); + } else { + RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0; + maps__set_maps_by_name_sorted(dest, false); + } + } else { + /* Unexpected copying to a maps containing entries. */ + for (unsigned int i = 0; !err && i < n; i++) { + struct map *pos = parent_maps_by_address[i]; + struct map *new = map__clone(pos); - map__put(new); + if (!new) + err = -ENOMEM; + else { + err = unwind__prepare_access(dest, new, NULL); + if (!err) + err = __maps__insert(dest, new); + } + map__put(new); + } } - - err = 0; -out_unlock: up_read(maps__lock(parent)); + up_write(maps__lock(dest)); return err; } -struct map *maps__find(struct maps *maps, u64 ip) +static int map__addr_cmp(const void *key, const void *entry) { - struct rb_node *p; - struct map_rb_node *m; - + const u64 ip = *(const u64 *)key; + const struct map *map = *(const struct map * const *)entry; - down_read(maps__lock(maps)); - - p = maps__entries(maps)->rb_node; - while (p != NULL) { - m = rb_entry(p, struct map_rb_node, rb_node); - if (ip < map__start(m->map)) - p = p->rb_left; - else if (ip >= map__end(m->map)) - p = p->rb_right; - else - goto out; - } - - m = NULL; -out: - up_read(maps__lock(maps)); - return m ? m->map : NULL; + if (ip < map__start(map)) + return -1; + if (ip >= map__end(map)) + return 1; + return 0; } -static int map__strcmp(const void *a, const void *b) +struct map *maps__find(struct maps *maps, u64 ip) { - const struct map *map_a = *(const struct map **)a; - const struct map *map_b = *(const struct map **)b; - const struct dso *dso_a = map__dso(map_a); - const struct dso *dso_b = map__dso(map_b); - int ret = strcmp(dso_a->short_name, dso_b->short_name); - - if (ret == 0 && map_a != map_b) { - /* - * Ensure distinct but name equal maps have an order in part to - * aid reference counting. - */ - ret = (int)map__start(map_a) - (int)map__start(map_b); - if (ret == 0) - ret = (int)((intptr_t)map_a - (intptr_t)map_b); + struct map *result = NULL; + bool done = false; + + /* See locking/sorting note. */ + while (!done) { + down_read(maps__lock(maps)); + if (maps__maps_by_address_sorted(maps)) { + struct map **mapp = + bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps), + sizeof(*mapp), map__addr_cmp); + + if (mapp) + result = map__get(*mapp); + done = true; + } + up_read(maps__lock(maps)); + if (!done) + maps__sort_by_address(maps); } - - return ret; + return result; } static int map__strcmp_name(const void *name, const void *b) @@ -593,126 +990,113 @@ static int map__strcmp_name(const void *name, const void *b) return strcmp(name, dso->short_name); } -void __maps__sort_by_name(struct maps *maps) -{ - qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); -} - -static int map__groups__sort_by_name_from_rbtree(struct maps *maps) -{ - struct map_rb_node *rb_node; - struct map **maps_by_name = realloc(maps__maps_by_name(maps), - maps__nr_maps(maps) * sizeof(struct map *)); - int i = 0; - - if (maps_by_name == NULL) - return -1; - - up_read(maps__lock(maps)); - down_write(maps__lock(maps)); - - RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; - RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps); - - maps__for_each_entry(maps, rb_node) - maps_by_name[i++] = map__get(rb_node->map); - - __maps__sort_by_name(maps); - - up_write(maps__lock(maps)); - down_read(maps__lock(maps)); - - return 0; -} - -static struct map *__maps__find_by_name(struct maps *maps, const char *name) +struct map *maps__find_by_name(struct maps *maps, const char *name) { - struct map **mapp; - - if (maps__maps_by_name(maps) == NULL && - map__groups__sort_by_name_from_rbtree(maps)) - return NULL; + struct map *result = NULL; + bool done = false; - mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), - sizeof(*mapp), map__strcmp_name); - if (mapp) - return *mapp; - return NULL; -} + /* See locking/sorting note. */ + while (!done) { + unsigned int i; -struct map *maps__find_by_name(struct maps *maps, const char *name) -{ - struct map_rb_node *rb_node; - struct map *map; + down_read(maps__lock(maps)); - down_read(maps__lock(maps)); + /* First check last found entry. */ + i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; + if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { + struct dso *dso = map__dso(maps__maps_by_name(maps)[i]); + if (dso && strcmp(dso->short_name, name) == 0) { + result = map__get(maps__maps_by_name(maps)[i]); + done = true; + } + } - if (RC_CHK_ACCESS(maps)->last_search_by_name) { - const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name); + /* Second search sorted array. */ + if (!done && maps__maps_by_name_sorted(maps)) { + struct map **mapp = + bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), + sizeof(*mapp), map__strcmp_name); - if (strcmp(dso->short_name, name) == 0) { - map = RC_CHK_ACCESS(maps)->last_search_by_name; - goto out_unlock; + if (mapp) { + result = map__get(*mapp); + i = mapp - maps__maps_by_name(maps); + RC_CHK_ACCESS(maps)->last_search_by_name_idx = i; + } + done = true; } - } - /* - * If we have maps->maps_by_name, then the name isn't in the rbtree, - * as maps->maps_by_name mirrors the rbtree when lookups by name are - * made. - */ - map = __maps__find_by_name(maps, name); - if (map || maps__maps_by_name(maps) != NULL) - goto out_unlock; - - /* Fallback to traversing the rbtree... */ - maps__for_each_entry(maps, rb_node) { - struct dso *dso; - - map = rb_node->map; - dso = map__dso(map); - if (strcmp(dso->short_name, name) == 0) { - RC_CHK_ACCESS(maps)->last_search_by_name = map; - goto out_unlock; + up_read(maps__lock(maps)); + if (!done) { + /* Sort and retry binary search. */ + if (maps__sort_by_name(maps)) { + /* + * Memory allocation failed do linear search + * through address sorted maps. + */ + struct map **maps_by_address; + unsigned int n; + + down_read(maps__lock(maps)); + maps_by_address = maps__maps_by_address(maps); + n = maps__nr_maps(maps); + for (i = 0; i < n; i++) { + struct map *pos = maps_by_address[i]; + struct dso *dso = map__dso(pos); + + if (dso && strcmp(dso->short_name, name) == 0) { + result = map__get(pos); + break; + } + } + up_read(maps__lock(maps)); + done = true; + } } } - map = NULL; - -out_unlock: - up_read(maps__lock(maps)); - return map; + return result; } struct map *maps__find_next_entry(struct maps *maps, struct map *map) { - struct map_rb_node *rb_node = maps__find_node(maps, map); - struct map_rb_node *next = map_rb_node__next(rb_node); + unsigned int i; + struct map *result = NULL; - if (next) - return next->map; + down_read(maps__lock(maps)); + i = maps__by_address_index(maps, map); + if (i < maps__nr_maps(maps)) + result = map__get(maps__maps_by_address(maps)[i]); - return NULL; + up_read(maps__lock(maps)); + return result; } void maps__fixup_end(struct maps *maps) { - struct map_rb_node *prev = NULL, *curr; + struct map **maps_by_address; + unsigned int n; down_write(maps__lock(maps)); + if (!maps__maps_by_address_sorted(maps)) + __maps__sort_by_address(maps); - maps__for_each_entry(maps, curr) { - if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map))) - map__set_end(prev->map, map__start(curr->map)); + maps_by_address = maps__maps_by_address(maps); + n = maps__nr_maps(maps); + for (unsigned int i = 1; i < n; i++) { + struct map *prev = maps_by_address[i - 1]; + struct map *curr = maps_by_address[i]; - prev = curr; + if (!map__end(prev) || map__end(prev) > map__start(curr)) + map__set_end(prev, map__start(curr)); } /* * We still haven't the actual symbols, so guess the * last map final address. */ - if (curr && !map__end(curr->map)) - map__set_end(curr->map, ~0ULL); + if (n > 0 && !map__end(maps_by_address[n - 1])) + map__set_end(maps_by_address[n - 1], ~0ULL); + + RC_CHK_ACCESS(maps)->ends_broken = false; up_write(maps__lock(maps)); } @@ -723,117 +1107,93 @@ void maps__fixup_end(struct maps *maps) */ int maps__merge_in(struct maps *kmaps, struct map *new_map) { - struct map_rb_node *rb_node; - struct rb_node *first; - bool overlaps; - LIST_HEAD(merged); - int err = 0; - - down_read(maps__lock(kmaps)); - first = first_ending_after(kmaps, new_map); - rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL; - overlaps = rb_node && map__start(rb_node->map) < map__end(new_map); - up_read(maps__lock(kmaps)); + unsigned int first_after_, kmaps__nr_maps; + struct map **kmaps_maps_by_address; + struct map **merged_maps_by_address; + unsigned int merged_nr_maps_allocated; + + /* First try under a read lock. */ + while (true) { + down_read(maps__lock(kmaps)); + if (maps__maps_by_address_sorted(kmaps)) + break; - if (!overlaps) - return maps__insert(kmaps, new_map); + up_read(maps__lock(kmaps)); - maps__for_each_entry(kmaps, rb_node) { - struct map *old_map = rb_node->map; + /* First after binary search requires sorted maps. Sort and try again. */ + maps__sort_by_address(kmaps); + } + first_after_ = first_ending_after(kmaps, new_map); + kmaps_maps_by_address = maps__maps_by_address(kmaps); - /* no overload with this one */ - if (map__end(new_map) < map__start(old_map) || - map__start(new_map) >= map__end(old_map)) - continue; + if (first_after_ >= maps__nr_maps(kmaps) || + map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { + /* No overlap so regular insert suffices. */ + up_read(maps__lock(kmaps)); + return maps__insert(kmaps, new_map); + } + up_read(maps__lock(kmaps)); - if (map__start(new_map) < map__start(old_map)) { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new......| -> |new..| - * |old....| -> |old....| - */ - map__set_end(new_map, map__start(old_map)); - } else { - /* - * |new.............| -> |new..| |new..| - * |old....| -> |old....| - */ - struct map_list_node *m = map_list_node__new(); + /* Plain insert with a read-lock failed, try again now with the write lock. */ + down_write(maps__lock(kmaps)); + if (!maps__maps_by_address_sorted(kmaps)) + __maps__sort_by_address(kmaps); + + first_after_ = first_ending_after(kmaps, new_map); + kmaps_maps_by_address = maps__maps_by_address(kmaps); + kmaps__nr_maps = maps__nr_maps(kmaps); + + if (first_after_ >= kmaps__nr_maps || + map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { + /* No overlap so regular insert suffices. */ + int ret = __maps__insert(kmaps, new_map); + up_write(maps__lock(kmaps)); + return ret; + } + /* Array to merge into, possibly 1 more for the sake of new_map. */ + merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; + if (kmaps__nr_maps + 1 == merged_nr_maps_allocated) + merged_nr_maps_allocated++; + + merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address)); + if (!merged_maps_by_address) { + up_write(maps__lock(kmaps)); + return -ENOMEM; + } + maps__set_maps_by_address(kmaps, merged_maps_by_address); + maps__set_maps_by_address_sorted(kmaps, true); + zfree(maps__maps_by_name_addr(kmaps)); + maps__set_maps_by_name_sorted(kmaps, true); + maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated); - if (!m) { - err = -ENOMEM; - goto out; - } + /* Copy entries before the new_map that can't overlap. */ + for (unsigned int i = 0; i < first_after_; i++) + merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]); - m->map = map__clone(new_map); - if (!m->map) { - free(m); - err = -ENOMEM; - goto out; - } + maps__set_nr_maps(kmaps, first_after_); - map__set_end(m->map, map__start(old_map)); - list_add_tail(&m->node, &merged); - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } else { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new..| -> x - * |old.........| -> |old.........| - */ - map__put(new_map); - new_map = NULL; - break; - } else { - /* - * |new......| -> |new...| - * |old....| -> |old....| - */ - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } - } + /* Add the new map, it will be split when the later overlapping mappings are added. */ + __maps__insert(kmaps, new_map); -out: - while (!list_empty(&merged)) { - struct map_list_node *old_node; + /* Insert mappings after new_map, splitting new_map in the process. */ + for (unsigned int i = first_after_; i < kmaps__nr_maps; i++) + __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]); - old_node = list_entry(merged.next, struct map_list_node, node); - list_del_init(&old_node->node); - if (!err) - err = maps__insert(kmaps, old_node->map); - map__put(old_node->map); - free(old_node); - } + /* Copy the maps from merged into kmaps. */ + for (unsigned int i = 0; i < kmaps__nr_maps; i++) + map__zput(kmaps_maps_by_address[i]); - if (new_map) { - if (!err) - err = maps__insert(kmaps, new_map); - map__put(new_map); - } - return err; + free(kmaps_maps_by_address); + up_write(maps__lock(kmaps)); + return 0; } void maps__load_first(struct maps *maps) { - struct map_rb_node *first; - down_read(maps__lock(maps)); - first = maps__first(maps); - if (first) - map__load(first->map); + if (maps__nr_maps(maps) > 0) + map__load(maps__maps_by_address(maps)[0]); up_read(maps__lock(maps)); } diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index d836d04c94..d9aa62ed96 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -3,45 +3,15 @@ #define __PERF_MAPS_H #include <linux/refcount.h> -#include <linux/rbtree.h> #include <stdio.h> #include <stdbool.h> #include <linux/types.h> -#include "rwsem.h" -#include <internal/rc_check.h> struct ref_reloc_sym; struct machine; struct map; struct maps; -struct map_list_node { - struct list_head node; - struct map *map; -}; - -static inline struct map_list_node *map_list_node__new(void) -{ - return malloc(sizeof(struct map_list_node)); -} - -struct map *maps__find(struct maps *maps, u64 addr); - -DECLARE_RC_STRUCT(maps) { - struct rb_root entries; - struct rw_semaphore lock; - struct machine *machine; - struct map *last_search_by_name; - struct map **maps_by_name; - refcount_t refcnt; - unsigned int nr_maps; - unsigned int nr_maps_allocated; -#ifdef HAVE_LIBUNWIND_SUPPORT - void *addr_space; - const struct unwind_libunwind_ops *unwind_libunwind_ops; -#endif -}; - #define KMAP_NAME_LEN 256 struct kmap { @@ -65,36 +35,22 @@ static inline void __maps__zput(struct maps **map) #define maps__zput(map) __maps__zput(&map) +bool maps__equal(struct maps *a, struct maps *b); + /* Iterate over map calling cb for each entry. */ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); /* Iterate over map removing an entry if cb returns true. */ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data); -static inline struct machine *maps__machine(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->machine; -} - -static inline unsigned int maps__nr_maps(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->nr_maps; -} - -static inline refcount_t *maps__refcnt(struct maps *maps) -{ - return &RC_CHK_ACCESS(maps)->refcnt; -} +struct machine *maps__machine(const struct maps *maps); +unsigned int maps__nr_maps(const struct maps *maps); /* Test only. */ +refcount_t *maps__refcnt(struct maps *maps); /* Test only. */ #ifdef HAVE_LIBUNWIND_SUPPORT -static inline void *maps__addr_space(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->addr_space; -} - -static inline const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; -} +void *maps__addr_space(const struct maps *maps); +void maps__set_addr_space(struct maps *maps, void *addr_space); +const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps); +void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops); #endif size_t maps__fprintf(struct maps *maps, FILE *fp); @@ -102,6 +58,7 @@ size_t maps__fprintf(struct maps *maps, FILE *fp); int maps__insert(struct maps *maps, struct map *map); void maps__remove(struct maps *maps, struct map *map); +struct map *maps__find(struct maps *maps, u64 addr); struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp); struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp); @@ -117,8 +74,6 @@ struct map *maps__find_next_entry(struct maps *maps, struct map *map); int maps__merge_in(struct maps *kmaps, struct map *new_map); -void __maps__sort_by_name(struct maps *maps); - void maps__fixup_end(struct maps *maps); void maps__load_first(struct maps *maps); diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 3a2e368787..637cbd4a7b 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -17,51 +17,126 @@ unsigned int perf_mem_events__loads_ldlat = 30; -#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } +#define E(t, n, s, l, a) { .tag = t, .name = n, .event_name = s, .ldlat = l, .aux_event = a } -static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { - E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"), - E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"), - E(NULL, NULL, NULL), +struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "mem-loads", true, 0), + E("ldlat-stores", "%s/mem-stores/P", "mem-stores", false, 0), + E(NULL, NULL, NULL, false, 0), }; #undef E static char mem_loads_name[100]; -static bool mem_loads_name__init; +static char mem_stores_name[100]; -struct perf_mem_event * __weak perf_mem_events__ptr(int i) +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i) { - if (i >= PERF_MEM_EVENTS__MAX) + if (i >= PERF_MEM_EVENTS__MAX || !pmu) return NULL; - return &perf_mem_events[i]; + return &pmu->mem_events[i]; } -const char * __weak perf_mem_events__name(int i, const char *pmu_name __maybe_unused) +static struct perf_pmu *perf_pmus__scan_mem(struct perf_pmu *pmu) { - struct perf_mem_event *e = perf_mem_events__ptr(i); + while ((pmu = perf_pmus__scan(pmu)) != NULL) { + if (pmu->mem_events) + return pmu; + } + return NULL; +} + +struct perf_pmu *perf_mem_events_find_pmu(void) +{ + /* + * The current perf mem doesn't support per-PMU configuration. + * The exact same configuration is applied to all the + * mem_events supported PMUs. + * Return the first mem_events supported PMU. + * + * Notes: The only case which may support multiple mem_events + * supported PMUs is Intel hybrid. The exact same mem_events + * is shared among the PMUs. Only configure the first PMU + * is good enough as well. + */ + return perf_pmus__scan_mem(NULL); +} + +/** + * perf_pmu__mem_events_num_mem_pmus - Get the number of mem PMUs since the given pmu + * @pmu: Start pmu. If it's NULL, search the entire PMU list. + */ +int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu) +{ + int num = 0; + + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) + num++; + + return num; +} +static const char *perf_pmu__mem_events_name(int i, struct perf_pmu *pmu) +{ + struct perf_mem_event *e; + + if (i >= PERF_MEM_EVENTS__MAX || !pmu) + return NULL; + + e = &pmu->mem_events[i]; if (!e) return NULL; - if (i == PERF_MEM_EVENTS__LOAD) { - if (!mem_loads_name__init) { - mem_loads_name__init = true; - scnprintf(mem_loads_name, sizeof(mem_loads_name), - e->name, perf_mem_events__loads_ldlat); + if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE) { + if (e->ldlat) { + if (!e->aux_event) { + /* ARM and Most of Intel */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name, + perf_mem_events__loads_ldlat); + } else { + /* Intel with mem-loads-aux event */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name, pmu->name, + perf_mem_events__loads_ldlat); + } + } else { + if (!e->aux_event) { + /* AMD and POWER */ + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu->name); + } else + return NULL; } + return mem_loads_name; } - return e->name; + if (i == PERF_MEM_EVENTS__STORE) { + scnprintf(mem_stores_name, sizeof(mem_stores_name), + e->name, pmu->name); + return mem_stores_name; + } + + return NULL; } -__weak bool is_mem_loads_aux_event(struct evsel *leader __maybe_unused) +bool is_mem_loads_aux_event(struct evsel *leader) { - return false; + struct perf_pmu *pmu = leader->pmu; + struct perf_mem_event *e; + + if (!pmu || !pmu->mem_events) + return false; + + e = &pmu->mem_events[PERF_MEM_EVENTS__LOAD]; + if (!e->aux_event) + return false; + + return leader->core.attr.config == e->aux_event; } -int perf_mem_events__parse(const char *str) +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str) { char *tok, *saveptr = NULL; bool found = false; @@ -79,7 +154,7 @@ int perf_mem_events__parse(const char *str) while (tok) { for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); if (!e->tag) continue; @@ -100,19 +175,21 @@ int perf_mem_events__parse(const char *str) return -1; } -static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu, +static bool perf_pmu__mem_events_supported(const char *mnt, struct perf_pmu *pmu, struct perf_mem_event *e) { - char sysfs_name[100]; char path[PATH_MAX]; struct stat st; - scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name); - scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name); + if (!e->event_name) + return true; + + scnprintf(path, PATH_MAX, "%s/devices/%s/events/%s", mnt, pmu->name, e->event_name); + return !stat(path, &st); } -int perf_mem_events__init(void) +int perf_pmu__mem_events_init(struct perf_pmu *pmu) { const char *mnt = sysfs__mount(); bool found = false; @@ -122,8 +199,7 @@ int perf_mem_events__init(void) return -ENOENT; for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); - struct perf_pmu *pmu = NULL; + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); /* * If the event entry isn't valid, skip initialization @@ -132,103 +208,66 @@ int perf_mem_events__init(void) if (!e->tag) continue; - /* - * Scan all PMUs not just core ones, since perf mem/c2c on - * platforms like AMD uses IBS OP PMU which is independent - * of core PMU. - */ - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - e->supported |= perf_mem_event__supported(mnt, pmu, e); - if (e->supported) { - found = true; - break; - } - } + e->supported |= perf_pmu__mem_events_supported(mnt, pmu, e); + if (e->supported) + found = true; } return found ? 0 : -ENOENT; } -void perf_mem_events__list(void) +void perf_pmu__mem_events_list(struct perf_pmu *pmu) { int j; for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - struct perf_mem_event *e = perf_mem_events__ptr(j); + struct perf_mem_event *e = perf_pmu__mem_events_ptr(pmu, j); fprintf(stderr, "%-*s%-*s%s", e->tag ? 13 : 0, e->tag ? : "", e->tag && verbose > 0 ? 25 : 0, - e->tag && verbose > 0 ? perf_mem_events__name(j, NULL) : "", + e->tag && verbose > 0 ? perf_pmu__mem_events_name(j, pmu) : "", e->supported ? ": available\n" : ""); } } -static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e, - int idx) +int perf_mem_events__record_args(const char **rec_argv, int *argv_nr) { const char *mnt = sysfs__mount(); struct perf_pmu *pmu = NULL; - - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - if (!perf_mem_event__supported(mnt, pmu, e)) { - pr_err("failed: event '%s' not supported\n", - perf_mem_events__name(idx, pmu->name)); - } - } -} - -int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, - char **rec_tmp, int *tmp_nr) -{ - const char *mnt = sysfs__mount(); - int i = *argv_nr, k = 0; struct perf_mem_event *e; + int i = *argv_nr; + const char *s; + char *copy; - for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { - e = perf_mem_events__ptr(j); - if (!e->record) - continue; + while ((pmu = perf_pmus__scan_mem(pmu)) != NULL) { + for (int j = 0; j < PERF_MEM_EVENTS__MAX; j++) { + e = perf_pmu__mem_events_ptr(pmu, j); + + if (!e->record) + continue; - if (perf_pmus__num_mem_pmus() == 1) { if (!e->supported) { pr_err("failed: event '%s' not supported\n", - perf_mem_events__name(j, NULL)); + perf_pmu__mem_events_name(j, pmu)); return -1; } - rec_argv[i++] = "-e"; - rec_argv[i++] = perf_mem_events__name(j, NULL); - } else { - struct perf_pmu *pmu = NULL; + s = perf_pmu__mem_events_name(j, pmu); + if (!s || !perf_pmu__mem_events_supported(mnt, pmu, e)) + continue; - if (!e->supported) { - perf_mem_events__print_unsupport_hybrid(e, j); + copy = strdup(s); + if (!copy) return -1; - } - - while ((pmu = perf_pmus__scan(pmu)) != NULL) { - const char *s = perf_mem_events__name(j, pmu->name); - - if (!perf_mem_event__supported(mnt, pmu, e)) - continue; - rec_argv[i++] = "-e"; - if (s) { - char *copy = strdup(s); - if (!copy) - return -1; - - rec_argv[i++] = copy; - rec_tmp[k++] = copy; - } - } + rec_argv[i++] = "-e"; + rec_argv[i++] = copy; } } *argv_nr = i; - *tmp_nr = k; return 0; } diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h index b40ad6ea93..15d5f0320d 100644 --- a/tools/perf/util/mem-events.h +++ b/tools/perf/util/mem-events.h @@ -14,9 +14,11 @@ struct perf_mem_event { bool record; bool supported; + bool ldlat; + u32 aux_event; const char *tag; const char *name; - const char *sysfs_name; + const char *event_name; }; struct mem_info { @@ -34,17 +36,18 @@ enum { }; extern unsigned int perf_mem_events__loads_ldlat; +extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX]; -int perf_mem_events__parse(const char *str); -int perf_mem_events__init(void); +int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str); +int perf_pmu__mem_events_init(struct perf_pmu *pmu); -const char *perf_mem_events__name(int i, const char *pmu_name); -struct perf_mem_event *perf_mem_events__ptr(int i); +struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i); +struct perf_pmu *perf_mem_events_find_pmu(void); +int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu); bool is_mem_loads_aux_event(struct evsel *leader); -void perf_mem_events__list(void); -int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, - char **rec_tmp, int *tmp_nr); +void perf_pmu__mem_events_list(struct perf_pmu *pmu); +int perf_mem_events__record_args(const char **rec_argv, int *argv_nr); int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info); int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info); diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index 966cca5a3e..79ef6095ab 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -44,6 +44,8 @@ struct metric_event *metricgroup__lookup(struct rblist *metric_events, if (!metric_events) return NULL; + if (evsel && evsel->metric_leader) + me.evsel = evsel->metric_leader; nd = rblist__find(metric_events, &me); if (nd) return container_of(nd, struct metric_event, nd); @@ -350,25 +352,23 @@ static int setup_metric_events(const char *pmu, struct hashmap *ids, return 0; } -static bool match_metric(const char *n, const char *list) +static bool match_metric(const char *metric_or_groups, const char *sought) { int len; char *m; - if (!list) + if (!sought) return false; - if (!strcmp(list, "all")) + if (!strcmp(sought, "all")) return true; - if (!n) - return !strcasecmp(list, "No_group"); - len = strlen(list); - m = strcasestr(n, list); - if (!m) - return false; - if ((m == n || m[-1] == ';' || m[-1] == ' ') && - (m[len] == 0 || m[len] == ';')) + if (!metric_or_groups) + return !strcasecmp(sought, "No_group"); + len = strlen(sought); + if (!strncasecmp(metric_or_groups, sought, len) && + (metric_or_groups[len] == 0 || metric_or_groups[len] == ';')) return true; - return false; + m = strchr(metric_or_groups, ';'); + return m && match_metric(m + 1, sought); } static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 66eabcea42..6f8b0fa176 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -2181,50 +2181,53 @@ int parse_event(struct evlist *evlist, const char *str) return ret; } +struct parse_events_error_entry { + /** @list: The list the error is part of. */ + struct list_head list; + /** @idx: index in the parsed string */ + int idx; + /** @str: string to display at the index */ + char *str; + /** @help: optional help string */ + char *help; +}; + void parse_events_error__init(struct parse_events_error *err) { - bzero(err, sizeof(*err)); + INIT_LIST_HEAD(&err->list); } void parse_events_error__exit(struct parse_events_error *err) { - zfree(&err->str); - zfree(&err->help); - zfree(&err->first_str); - zfree(&err->first_help); + struct parse_events_error_entry *pos, *tmp; + + list_for_each_entry_safe(pos, tmp, &err->list, list) { + zfree(&pos->str); + zfree(&pos->help); + list_del_init(&pos->list); + free(pos); + } } void parse_events_error__handle(struct parse_events_error *err, int idx, char *str, char *help) { + struct parse_events_error_entry *entry; + if (WARN(!str || !err, "WARNING: failed to provide error string or struct\n")) goto out_free; - switch (err->num_errors) { - case 0: - err->idx = idx; - err->str = str; - err->help = help; - break; - case 1: - err->first_idx = err->idx; - err->idx = idx; - err->first_str = err->str; - err->str = str; - err->first_help = err->help; - err->help = help; - break; - default: - pr_debug("Multiple errors dropping message: %s (%s)\n", - err->str, err->help ?: "<no help>"); - free(err->str); - err->str = str; - free(err->help); - err->help = help; - break; + + entry = zalloc(sizeof(*entry)); + if (!entry) { + pr_err("Failed to allocate memory for event parsing error: %s (%s)\n", + str, help ?: "<no help>"); + goto out_free; } - err->num_errors++; + entry->idx = idx; + entry->str = str; + entry->help = help; + list_add(&entry->list, &err->list); return; - out_free: free(str); free(help); @@ -2294,19 +2297,34 @@ static void __parse_events_error__print(int err_idx, const char *err_str, } } -void parse_events_error__print(struct parse_events_error *err, +void parse_events_error__print(const struct parse_events_error *err, const char *event) { - if (!err->num_errors) - return; + struct parse_events_error_entry *pos; + bool first = true; + + list_for_each_entry(pos, &err->list, list) { + if (!first) + fputs("\n", stderr); + __parse_events_error__print(pos->idx, pos->str, pos->help, event); + first = false; + } +} - __parse_events_error__print(err->idx, err->str, err->help, event); +/* + * In the list of errors err, do any of the error strings (str) contain the + * given needle string? + */ +bool parse_events_error__contains(const struct parse_events_error *err, + const char *needle) +{ + struct parse_events_error_entry *pos; - if (err->num_errors > 1) { - fputs("\nInitial error:\n", stderr); - __parse_events_error__print(err->first_idx, err->first_str, - err->first_help, event); + list_for_each_entry(pos, &err->list, list) { + if (strstr(pos->str, needle) != NULL) + return true; } + return false; } #undef MAX_WIDTH diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index 63c0a36a4b..809359e854 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h @@ -130,13 +130,8 @@ struct parse_events_term { }; struct parse_events_error { - int num_errors; /* number of errors encountered */ - int idx; /* index in the parsed string */ - char *str; /* string to display at the index */ - char *help; /* optional help string */ - int first_idx;/* as above, but for the first encountered error */ - char *first_str; - char *first_help; + /** @list: The head of a list of errors. */ + struct list_head list; }; /* A wrapper around a list of terms for the sake of better type safety. */ @@ -247,9 +242,10 @@ void parse_events_error__init(struct parse_events_error *err); void parse_events_error__exit(struct parse_events_error *err); void parse_events_error__handle(struct parse_events_error *err, int idx, char *str, char *help); -void parse_events_error__print(struct parse_events_error *err, +void parse_events_error__print(const struct parse_events_error *err, const char *event); - +bool parse_events_error__contains(const struct parse_events_error *err, + const char *needle); #ifdef HAVE_LIBELF_SUPPORT /* * If the probe point starts with '%', diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y index de098caf0c..d70f5d84af 100644 --- a/tools/perf/util/parse-events.y +++ b/tools/perf/util/parse-events.y @@ -536,8 +536,6 @@ tracepoint_name opt_event_config list = alloc_list(); if (!list) YYNOMEM; - if (error) - error->idx = @1.first_column; err = parse_events_add_tracepoint(list, &parse_state->idx, $1.sys, $1.event, error, $2, &@1); diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c index a4a100425b..cda1c62096 100644 --- a/tools/perf/util/parse-regs-options.c +++ b/tools/perf/util/parse-regs-options.c @@ -46,22 +46,18 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr) if (!strcmp(s, "?")) { fprintf(stderr, "available registers: "); -#ifdef HAVE_PERF_REGS_SUPPORT - for (r = sample_reg_masks; r->name; r++) { + for (r = arch__sample_reg_masks(); r->name; r++) { if (r->mask & mask) fprintf(stderr, "%s ", r->name); } -#endif fputc('\n', stderr); /* just printing available regs */ goto error; } -#ifdef HAVE_PERF_REGS_SUPPORT - for (r = sample_reg_masks; r->name; r++) { + for (r = arch__sample_reg_masks(); r->name; r++) { if ((r->mask & mask) && !strcasecmp(s, r->name)) break; } -#endif if (!r || !r->name) { ui__warning("Unknown register \"%s\", check man page or run \"perf record %s?\"\n", s, intr ? "-I" : "--user-regs="); diff --git a/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c index 696566c547..9dcda80d31 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_aarch64.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/arm64/include/uapi/asm/perf_regs.h" @@ -92,5 +90,3 @@ uint64_t __perf_reg_sp_arm64(void) { return PERF_REG_ARM64_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_arm.c b/tools/perf/util/perf-regs-arch/perf_regs_arm.c index 700fd07cd2..e29d130a58 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_arm.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_arm.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/arm/include/uapi/asm/perf_regs.h" @@ -56,5 +54,3 @@ uint64_t __perf_reg_sp_arm(void) { return PERF_REG_ARM_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_csky.c b/tools/perf/util/perf-regs-arch/perf_regs_csky.c index a2841094e0..75b461ef2e 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_csky.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_csky.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../arch/csky/include/uapi/asm/perf_regs.h" @@ -96,5 +94,3 @@ uint64_t __perf_reg_sp_csky(void) { return PERF_REG_CSKY_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c index a9ba0f9341..043f97f4e3 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_loongarch.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/loongarch/include/uapi/asm/perf_regs.h" @@ -87,5 +85,3 @@ uint64_t __perf_reg_sp_loongarch(void) { return PERF_REG_LOONGARCH_R3; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_mips.c b/tools/perf/util/perf-regs-arch/perf_regs_mips.c index 5a45830cfb..793178fc3c 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_mips.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_mips.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/mips/include/uapi/asm/perf_regs.h" @@ -83,5 +81,3 @@ uint64_t __perf_reg_sp_mips(void) { return PERF_REG_MIPS_R29; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c index 1f0d682db7..08636bb09a 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_powerpc.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/powerpc/include/uapi/asm/perf_regs.h" @@ -141,5 +139,3 @@ uint64_t __perf_reg_sp_powerpc(void) { return PERF_REG_POWERPC_R1; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_riscv.c b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c index e432630be4..337b687c65 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_riscv.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_riscv.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/riscv/include/uapi/asm/perf_regs.h" @@ -88,5 +86,3 @@ uint64_t __perf_reg_sp_riscv(void) { return PERF_REG_RISCV_SP; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_s390.c b/tools/perf/util/perf-regs-arch/perf_regs_s390.c index 1c7a46db77..d69bba8810 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_s390.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_s390.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/s390/include/uapi/asm/perf_regs.h" @@ -92,5 +90,3 @@ uint64_t __perf_reg_sp_s390(void) { return PERF_REG_S390_R15; } - -#endif diff --git a/tools/perf/util/perf-regs-arch/perf_regs_x86.c b/tools/perf/util/perf-regs-arch/perf_regs_x86.c index 873c620f06..708954a9d3 100644 --- a/tools/perf/util/perf-regs-arch/perf_regs_x86.c +++ b/tools/perf/util/perf-regs-arch/perf_regs_x86.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#ifdef HAVE_PERF_REGS_SUPPORT - #include "../perf_regs.h" #include "../../../arch/x86/include/uapi/asm/perf_regs.h" @@ -94,5 +92,3 @@ uint64_t __perf_reg_sp_x86(void) { return PERF_REG_X86_SP; } - -#endif diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c index 8f04d3b7f3..59fbbba796 100644 --- a/tools/perf/util/perf_event_attr_fprintf.c +++ b/tools/perf/util/perf_event_attr_fprintf.c @@ -7,6 +7,8 @@ #include <linux/types.h> #include <linux/perf_event.h> #include "util/evsel_fprintf.h" +#include "util/pmu.h" +#include "util/pmus.h" #include "trace-event.h" struct bit_names { @@ -75,9 +77,12 @@ static void __p_read_format(char *buf, size_t size, u64 value) } #define ENUM_ID_TO_STR_CASE(x) case x: return (#x); -static const char *stringify_perf_type_id(u64 value) +static const char *stringify_perf_type_id(struct perf_pmu *pmu, u32 type) { - switch (value) { + if (pmu) + return pmu->name; + + switch (type) { ENUM_ID_TO_STR_CASE(PERF_TYPE_HARDWARE) ENUM_ID_TO_STR_CASE(PERF_TYPE_SOFTWARE) ENUM_ID_TO_STR_CASE(PERF_TYPE_TRACEPOINT) @@ -175,9 +180,9 @@ do { \ #define print_id_unsigned(_s) PRINT_ID(_s, "%"PRIu64) #define print_id_hex(_s) PRINT_ID(_s, "%#"PRIx64) -static void __p_type_id(char *buf, size_t size, u64 value) +static void __p_type_id(struct perf_pmu *pmu, char *buf, size_t size, u64 value) { - print_id_unsigned(stringify_perf_type_id(value)); + print_id_unsigned(stringify_perf_type_id(pmu, value)); } static void __p_config_hw_id(char *buf, size_t size, u64 value) @@ -217,8 +222,14 @@ static void __p_config_tracepoint_id(char *buf, size_t size, u64 value) } #endif -static void __p_config_id(char *buf, size_t size, u32 type, u64 value) +static void __p_config_id(struct perf_pmu *pmu, char *buf, size_t size, u32 type, u64 value) { + const char *name = perf_pmu__name_from_config(pmu, value); + + if (name) { + print_id_hex(name); + return; + } switch (type) { case PERF_TYPE_HARDWARE: return __p_config_hw_id(buf, size, value); @@ -246,8 +257,8 @@ static void __p_config_id(char *buf, size_t size, u32 type, u64 value) #define p_sample_type(val) __p_sample_type(buf, BUF_SIZE, val) #define p_branch_sample_type(val) __p_branch_sample_type(buf, BUF_SIZE, val) #define p_read_format(val) __p_read_format(buf, BUF_SIZE, val) -#define p_type_id(val) __p_type_id(buf, BUF_SIZE, val) -#define p_config_id(val) __p_config_id(buf, BUF_SIZE, attr->type, val) +#define p_type_id(val) __p_type_id(pmu, buf, BUF_SIZE, val) +#define p_config_id(val) __p_config_id(pmu, buf, BUF_SIZE, attr->type, val) #define PRINT_ATTRn(_n, _f, _p, _a) \ do { \ @@ -262,6 +273,7 @@ do { \ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, attr__fprintf_f attr__fprintf, void *priv) { + struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); char buf[BUF_SIZE]; int ret = 0; diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index e2275856b5..44b90bbf2d 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -21,7 +21,14 @@ uint64_t __weak arch__user_reg_mask(void) return 0; } -#ifdef HAVE_PERF_REGS_SUPPORT +static const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; + +const struct sample_reg * __weak arch__sample_reg_masks(void) +{ + return sample_reg_masks; +} const char *perf_reg_name(int id, const char *arch) { @@ -125,5 +132,3 @@ uint64_t perf_arch_reg_sp(const char *arch) pr_err("Fail to find SP register for arch %s, returns 0\n", arch); return 0; } - -#endif diff --git a/tools/perf/util/perf_regs.h b/tools/perf/util/perf_regs.h index ecd2a53620..f2d0736d65 100644 --- a/tools/perf/util/perf_regs.h +++ b/tools/perf/util/perf_regs.h @@ -26,9 +26,7 @@ enum { int arch_sdt_arg_parse_op(char *old_op, char **new_op); uint64_t arch__intr_reg_mask(void); uint64_t arch__user_reg_mask(void); - -#ifdef HAVE_PERF_REGS_SUPPORT -extern const struct sample_reg sample_reg_masks[]; +const struct sample_reg *arch__sample_reg_masks(void); const char *perf_reg_name(int id, const char *arch); int perf_reg_value(u64 *valp, struct regs_dump *regs, int id); @@ -67,34 +65,4 @@ static inline uint64_t DWARF_MINIMAL_REGS(const char *arch) return (1ULL << perf_arch_reg_ip(arch)) | (1ULL << perf_arch_reg_sp(arch)); } -#else - -static inline uint64_t DWARF_MINIMAL_REGS(const char *arch __maybe_unused) -{ - return 0; -} - -static inline const char *perf_reg_name(int id __maybe_unused, const char *arch __maybe_unused) -{ - return "unknown"; -} - -static inline int perf_reg_value(u64 *valp __maybe_unused, - struct regs_dump *regs __maybe_unused, - int id __maybe_unused) -{ - return 0; -} - -static inline uint64_t perf_arch_reg_ip(const char *arch __maybe_unused) -{ - return 0; -} - -static inline uint64_t perf_arch_reg_sp(const char *arch __maybe_unused) -{ - return 0; -} - -#endif /* HAVE_PERF_REGS_SUPPORT */ #endif /* __PERF_REGS_H */ diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 6b82f4759c..cc349d9cb0 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -36,6 +36,18 @@ struct perf_pmu perf_pmu__fake = { #define UNIT_MAX_LEN 31 /* max length for event unit name */ +enum event_source { + /* An event loaded from /sys/devices/<pmu>/events. */ + EVENT_SRC_SYSFS, + /* An event loaded from a CPUID matched json file. */ + EVENT_SRC_CPU_JSON, + /* + * An event loaded from a /sys/devices/<pmu>/identifier matched json + * file. + */ + EVENT_SRC_SYS_JSON, +}; + /** * struct perf_pmu_alias - An event either read from sysfs or builtin in * pmu-events.c, created by parsing the pmu-events json files. @@ -425,9 +437,30 @@ static struct perf_pmu_alias *perf_pmu__find_alias(struct perf_pmu *pmu, { struct perf_pmu_alias *alias; - if (load && !pmu->sysfs_aliases_loaded) - pmu_aliases_parse(pmu); + if (load && !pmu->sysfs_aliases_loaded) { + bool has_sysfs_event; + char event_file_name[FILENAME_MAX + 8]; + + /* + * Test if alias/event 'name' exists in the PMU's sysfs/events + * directory. If not skip parsing the sysfs aliases. Sysfs event + * name must be all lower or all upper case. + */ + scnprintf(event_file_name, sizeof(event_file_name), "events/%s", name); + for (size_t i = 7, n = 7 + strlen(name); i < n; i++) + event_file_name[i] = tolower(event_file_name[i]); + + has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name); + if (!has_sysfs_event) { + for (size_t i = 7, n = 7 + strlen(name); i < n; i++) + event_file_name[i] = toupper(event_file_name[i]); + + has_sysfs_event = perf_pmu__file_exists(pmu, event_file_name); + } + if (has_sysfs_event) + pmu_aliases_parse(pmu); + } list_for_each_entry(alias, &pmu->aliases, list) { if (!strcasecmp(alias->name, name)) return alias; @@ -500,7 +533,7 @@ static int update_alias(const struct pmu_event *pe, static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, const char *desc, const char *val, FILE *val_fd, - const struct pmu_event *pe) + const struct pmu_event *pe, enum event_source src) { struct perf_pmu_alias *alias; int ret; @@ -552,25 +585,30 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name, } snprintf(alias->unit, sizeof(alias->unit), "%s", unit); } - if (!pe) { - /* Update an event from sysfs with json data. */ - struct update_alias_data data = { - .pmu = pmu, - .alias = alias, - }; - + switch (src) { + default: + case EVENT_SRC_SYSFS: alias->from_sysfs = true; if (pmu->events_table) { + /* Update an event from sysfs with json data. */ + struct update_alias_data data = { + .pmu = pmu, + .alias = alias, + }; if (pmu_events_table__find_event(pmu->events_table, pmu, name, update_alias, &data) == 0) - pmu->loaded_json_aliases++; + pmu->cpu_json_aliases++; } - } - - if (!pe) pmu->sysfs_aliases++; - else - pmu->loaded_json_aliases++; + break; + case EVENT_SRC_CPU_JSON: + pmu->cpu_json_aliases++; + break; + case EVENT_SRC_SYS_JSON: + pmu->sys_json_aliases++; + break; + + } list_add_tail(&alias->list, &pmu->aliases); return 0; } @@ -646,7 +684,8 @@ static int pmu_aliases_parse(struct perf_pmu *pmu) } if (perf_pmu__new_alias(pmu, name, /*desc=*/ NULL, - /*val=*/ NULL, file, /*pe=*/ NULL) < 0) + /*val=*/ NULL, file, /*pe=*/ NULL, + EVENT_SRC_SYSFS) < 0) pr_debug("Cannot set up %s\n", name); fclose(file); } @@ -657,7 +696,7 @@ static int pmu_aliases_parse(struct perf_pmu *pmu) return 0; } -static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms) +static int pmu_alias_terms(struct perf_pmu_alias *alias, int err_loc, struct list_head *terms) { struct parse_events_term *term, *cloned; struct parse_events_terms clone_terms; @@ -675,6 +714,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias, struct list_head *terms * which we don't want for implicit terms in aliases. */ cloned->weak = true; + cloned->err_term = cloned->err_val = err_loc; list_add_tail(&cloned->list, &clone_terms.terms); } list_splice_init(&clone_terms.terms, terms); @@ -899,7 +939,8 @@ static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe, { struct perf_pmu *pmu = vdata; - perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, pe); + perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, + pe, EVENT_SRC_CPU_JSON); return 0; } @@ -934,13 +975,14 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, return 0; if (pmu_uncore_alias_match(pe->pmu, pmu->name) && - pmu_uncore_identifier_match(pe->compat, pmu->id)) { + pmu_uncore_identifier_match(pe->compat, pmu->id)) { perf_pmu__new_alias(pmu, pe->name, pe->desc, pe->event, /*val_fd=*/ NULL, - pe); + pe, + EVENT_SRC_SYS_JSON); } return 0; @@ -986,8 +1028,10 @@ static int pmu_max_precise(int dirfd, struct perf_pmu *pmu) } void __weak -perf_pmu__arch_init(struct perf_pmu *pmu __maybe_unused) +perf_pmu__arch_init(struct perf_pmu *pmu) { + if (pmu->is_core) + pmu->mem_events = perf_mem_events; } struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char *name) @@ -1032,6 +1076,12 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char pmu->max_precise = pmu_max_precise(dirfd, pmu); pmu->alias_name = pmu_find_alias_name(pmu, dirfd); pmu->events_table = perf_pmu__find_events_table(pmu); + /* + * Load the sys json events/aliases when loading the PMU as each event + * may have a different compat regular expression. We therefore can't + * know the number of sys json events/aliases without computing the + * regular expressions for them all. + */ pmu_add_sys_aliases(pmu); list_add_tail(&pmu->list, pmus); @@ -1360,8 +1410,8 @@ static int pmu_config_term(const struct perf_pmu *pmu, parse_events_error__handle(err, term->err_val, asprintf(&err_str, - "value too big for format, maximum is %llu", - (unsigned long long)max_val) < 0 + "value too big for format (%s), maximum is %llu", + format->name, (unsigned long long)max_val) < 0 ? strdup("value too big for format") : err_str, NULL); @@ -1515,7 +1565,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_ alias = pmu_find_alias(pmu, term); if (!alias) continue; - ret = pmu_alias_terms(alias, &term->list); + ret = pmu_alias_terms(alias, term->err_term, &term->list); if (ret) { parse_events_error__handle(err, term->err_term, strdup("Failure to duplicate terms"), @@ -1629,15 +1679,15 @@ size_t perf_pmu__num_events(struct perf_pmu *pmu) { size_t nr; - if (!pmu->sysfs_aliases_loaded) - pmu_aliases_parse(pmu); - - nr = pmu->sysfs_aliases; + pmu_aliases_parse(pmu); + nr = pmu->sysfs_aliases + pmu->sys_json_aliases;; if (pmu->cpu_aliases_added) - nr += pmu->loaded_json_aliases; + nr += pmu->cpu_json_aliases; else if (pmu->events_table) - nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->loaded_json_aliases; + nr += pmu_events_table__num_events(pmu->events_table, pmu) - pmu->cpu_json_aliases; + else + assert(pmu->cpu_json_aliases == 0); return pmu->selectable ? nr + 1 : nr; } @@ -1690,6 +1740,7 @@ int perf_pmu__for_each_event(struct perf_pmu *pmu, bool skip_duplicate_pmus, struct strbuf sb; strbuf_init(&sb, /*hint=*/ 0); + pmu_aliases_parse(pmu); pmu_add_cpu_aliases(pmu); list_for_each_entry(event, &pmu->aliases, list) { size_t buf_used; @@ -2082,3 +2133,22 @@ void perf_pmu__delete(struct perf_pmu *pmu) zfree(&pmu->id); free(pmu); } + +const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config) +{ + struct perf_pmu_alias *event; + + if (!pmu) + return NULL; + + pmu_aliases_parse(pmu); + pmu_add_cpu_aliases(pmu); + list_for_each_entry(event, &pmu->aliases, list) { + struct perf_event_attr attr = {.config = 0,}; + int ret = perf_pmu__config(pmu, &attr, &event->terms, NULL); + + if (ret == 0 && config == attr.config) + return event->name; + } + return NULL; +} diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 424c3fee09..77c59ebc05 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -10,6 +10,8 @@ #include <stdio.h> #include "parse-events.h" #include "pmu-events/pmu-events.h" +#include "map_symbol.h" +#include "mem-events.h" struct evsel_config_term; struct perf_cpu_map; @@ -121,8 +123,10 @@ struct perf_pmu { const struct pmu_events_table *events_table; /** @sysfs_aliases: Number of sysfs aliases loaded. */ uint32_t sysfs_aliases; - /** @sysfs_aliases: Number of json event aliases loaded. */ - uint32_t loaded_json_aliases; + /** @cpu_json_aliases: Number of json event aliases loaded specific to the CPUID. */ + uint32_t cpu_json_aliases; + /** @sys_json_aliases: Number of json event aliases loaded matching the PMU's identifier. */ + uint32_t sys_json_aliases; /** @sysfs_aliases_loaded: Are sysfs aliases loaded from disk? */ bool sysfs_aliases_loaded; /** @@ -162,6 +166,11 @@ struct perf_pmu { */ bool exclude_guest; } missing_features; + + /** + * @mem_events: List of the supported mem events + */ + struct perf_mem_event *mem_events; }; /** @perf_pmu__fake: A special global PMU used for testing. */ @@ -266,5 +275,6 @@ struct perf_pmu *perf_pmu__lookup(struct list_head *pmus, int dirfd, const char struct perf_pmu *perf_pmu__create_placeholder_core_pmu(struct list_head *core_pmus); void perf_pmu__delete(struct perf_pmu *pmu); struct perf_pmu *perf_pmus__find_core_pmu(void); +const char *perf_pmu__name_from_config(struct perf_pmu *pmu, u64 config); #endif /* __PMU_H */ diff --git a/tools/perf/util/pmus.c b/tools/perf/util/pmus.c index ce49314617..16505071d3 100644 --- a/tools/perf/util/pmus.c +++ b/tools/perf/util/pmus.c @@ -345,12 +345,6 @@ const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str) return NULL; } -int __weak perf_pmus__num_mem_pmus(void) -{ - /* All core PMUs are for mem events. */ - return perf_pmus__num_core_pmus(); -} - /** Struct for ordering events as output in perf list. */ struct sevent { /** PMU for event. */ diff --git a/tools/perf/util/pmus.h b/tools/perf/util/pmus.h index 4c67153ac2..94d2a08d89 100644 --- a/tools/perf/util/pmus.h +++ b/tools/perf/util/pmus.h @@ -17,7 +17,6 @@ struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu); const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str); -int perf_pmus__num_mem_pmus(void); void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state); bool perf_pmus__have_event(const char *pname, const char *name); int perf_pmus__num_core_pmus(void); diff --git a/tools/perf/util/print-events.c b/tools/perf/util/print-events.c index b14d1a894a..7b54e93854 100644 --- a/tools/perf/util/print-events.c +++ b/tools/perf/util/print-events.c @@ -28,6 +28,7 @@ #include "tracepoint.h" #include "pfm.h" #include "thread_map.h" +#include "util.h" #define MAX_NAME_LEN 100 @@ -63,6 +64,8 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus { char *events_path = get_tracing_file("events"); int events_fd = open(events_path, O_PATH); + struct dirent **sys_namelist = NULL; + int sys_items; put_tracing_file(events_path); if (events_fd < 0) { @@ -70,10 +73,7 @@ void print_tracepoint_events(const struct print_callbacks *print_cb __maybe_unus return; } -#ifdef HAVE_SCANDIRAT_SUPPORT -{ - struct dirent **sys_namelist = NULL; - int sys_items = tracing_events__scandir_alphasort(&sys_namelist); + sys_items = tracing_events__scandir_alphasort(&sys_namelist); for (int i = 0; i < sys_items; i++) { struct dirent *sys_dirent = sys_namelist[i]; @@ -130,11 +130,6 @@ next_sys: } free(sys_namelist); -} -#else - printf("\nWARNING: Your libc doesn't have the scandirat function, please ask its maintainers to implement it.\n" - " As a rough fallback, please do 'ls %s' to see the available tracepoint events.\n", events_path); -#endif close(events_fd); } diff --git a/tools/perf/util/print_insn.c b/tools/perf/util/print_insn.c new file mode 100644 index 0000000000..459e0e93d7 --- /dev/null +++ b/tools/perf/util/print_insn.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Instruction binary disassembler based on capstone. + * + * Author(s): Changbin Du <changbin.du@huawei.com> + */ +#include <string.h> +#include <stdbool.h> +#include "debug.h" +#include "sample.h" +#include "symbol.h" +#include "machine.h" +#include "thread.h" +#include "print_insn.h" + +size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp) +{ + int printed = 0; + + for (int i = 0; i < sample->insn_len; i++) { + printed += fprintf(fp, "%02x", (unsigned char)sample->insn[i]); + if (sample->insn_len - i > 1) + printed += fprintf(fp, " "); + } + return printed; +} + +#ifdef HAVE_LIBCAPSTONE_SUPPORT +#include <capstone/capstone.h> + +static int capstone_init(struct machine *machine, csh *cs_handle) +{ + cs_arch arch; + cs_mode mode; + + if (machine__is(machine, "x86_64")) { + arch = CS_ARCH_X86; + mode = CS_MODE_64; + } else if (machine__normalized_is(machine, "x86")) { + arch = CS_ARCH_X86; + mode = CS_MODE_32; + } else if (machine__normalized_is(machine, "arm64")) { + arch = CS_ARCH_ARM64; + mode = CS_MODE_ARM; + } else if (machine__normalized_is(machine, "arm")) { + arch = CS_ARCH_ARM; + mode = CS_MODE_ARM + CS_MODE_V8; + } else if (machine__normalized_is(machine, "s390")) { + arch = CS_ARCH_SYSZ; + mode = CS_MODE_BIG_ENDIAN; + } else { + return -1; + } + + if (cs_open(arch, mode, cs_handle) != CS_ERR_OK) { + pr_warning_once("cs_open failed\n"); + return -1; + } + + if (machine__normalized_is(machine, "x86")) { + cs_option(*cs_handle, CS_OPT_SYNTAX, CS_OPT_SYNTAX_ATT); + /* + * Resolving address operands to symbols is implemented + * on x86 by investigating instruction details. + */ + cs_option(*cs_handle, CS_OPT_DETAIL, CS_OPT_ON); + } + + return 0; +} + +static size_t print_insn_x86(struct perf_sample *sample, struct thread *thread, + cs_insn *insn, FILE *fp) +{ + struct addr_location al; + size_t printed = 0; + + if (insn->detail && insn->detail->x86.op_count == 1) { + cs_x86_op *op = &insn->detail->x86.operands[0]; + + addr_location__init(&al); + if (op->type == X86_OP_IMM && + thread__find_symbol(thread, sample->cpumode, op->imm, &al)) { + printed += fprintf(fp, "%s ", insn[0].mnemonic); + printed += symbol__fprintf_symname_offs(al.sym, &al, fp); + addr_location__exit(&al); + return printed; + } + addr_location__exit(&al); + } + + printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str); + return printed; +} + +size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread, + struct machine *machine, FILE *fp) +{ + csh cs_handle; + cs_insn *insn; + size_t count; + size_t printed = 0; + int ret; + + /* TODO: Try to initiate capstone only once but need a proper place. */ + ret = capstone_init(machine, &cs_handle); + if (ret < 0) { + /* fallback */ + return sample__fprintf_insn_raw(sample, fp); + } + + count = cs_disasm(cs_handle, (uint8_t *)sample->insn, sample->insn_len, + sample->ip, 1, &insn); + if (count > 0) { + if (machine__normalized_is(machine, "x86")) + printed += print_insn_x86(sample, thread, &insn[0], fp); + else + printed += fprintf(fp, "%s %s", insn[0].mnemonic, insn[0].op_str); + cs_free(insn, count); + } else { + printed += fprintf(fp, "illegal instruction"); + } + + cs_close(&cs_handle); + return printed; +} +#else +size_t sample__fprintf_insn_asm(struct perf_sample *sample __maybe_unused, + struct thread *thread __maybe_unused, + struct machine *machine __maybe_unused, + FILE *fp __maybe_unused) +{ + return 0; +} +#endif diff --git a/tools/perf/util/print_insn.h b/tools/perf/util/print_insn.h new file mode 100644 index 0000000000..465bdcfcc2 --- /dev/null +++ b/tools/perf/util/print_insn.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PERF_PRINT_INSN_H +#define PERF_PRINT_INSN_H + +#include <stddef.h> +#include <stdio.h> + +struct perf_sample; +struct thread; +struct machine; + +size_t sample__fprintf_insn_asm(struct perf_sample *sample, struct thread *thread, + struct machine *machine, FILE *fp); +size_t sample__fprintf_insn_raw(struct perf_sample *sample, FILE *fp); + +#endif /* PERF_PRINT_INSN_H */ diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index a1a7960436..5c12459e97 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -11,6 +11,7 @@ #include <sys/stat.h> #include <fcntl.h> #include <errno.h> +#include <libgen.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> @@ -358,6 +359,7 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso) map = maps__find_by_name(machine__kernel_maps(host_machine), module_name); if (map) { dso = map__dso(map); + map__put(map); goto found; } pr_debug("Failed to find module %s.\n", module); @@ -2273,9 +2275,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp, ret = pp->function ? 0 : -ENOMEM; out: - if (map && !is_kprobe) { - map__put(map); - } + map__put(map); return ret; } diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 593b660ec7..1bec945f48 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -31,6 +31,7 @@ util/counts.c util/print_binary.c util/strlist.c util/trace-event.c +util/trace-event-parse.c ../lib/rbtree.c util/string.c util/symbol_fprintf.c diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 8761f51b5c..0aeb97c11c 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -103,6 +103,16 @@ int perf_pmu__scan_file(const struct perf_pmu *pmu, const char *name, const char return EOF; } +const char *perf_pmu__name_from_config(struct perf_pmu *pmu __maybe_unused, u64 config __maybe_unused) +{ + return NULL; +} + +struct perf_pmu *perf_pmus__find_by_type(unsigned int type __maybe_unused) +{ + return NULL; +} + int perf_pmus__num_core_pmus(void) { return 1; @@ -181,6 +191,7 @@ int perf_bpf_filter__destroy(struct evsel *evsel __maybe_unused) * implementing 'verbose' and 'eprintf'. */ int verbose; +int debug_kmaps; int debug_peo_args; int eprintf(int level, int var, const char *fmt, ...); diff --git a/tools/perf/util/rb_resort.h b/tools/perf/util/rb_resort.h index 376e86cb4c..d927a0d250 100644 --- a/tools/perf/util/rb_resort.h +++ b/tools/perf/util/rb_resort.h @@ -143,9 +143,4 @@ struct __name##_sorted *__name = __name##_sorted__new DECLARE_RESORT_RB(__name)(&__ilist->rblist.entries.rb_root, \ __ilist->rblist.nr_entries) -/* For 'struct machine->threads' */ -#define DECLARE_RESORT_RB_MACHINE_THREADS(__name, __machine, hash_bucket) \ - DECLARE_RESORT_RB(__name)(&__machine->threads[hash_bucket].entries.rb_root, \ - __machine->threads[hash_bucket].nr) - #endif /* _PERF_RESORT_RB_H_ */ diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 860e1837ba..b4f0f60e60 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -858,6 +858,10 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample, pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(evsel__name(evsel))); pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->core.attr, sizeof(evsel->core.attr))); + pydict_set_item_string_decref(dict_sample, "id", + PyLong_FromUnsignedLongLong(sample->id)); + pydict_set_item_string_decref(dict_sample, "stream_id", + PyLong_FromUnsignedLongLong(sample->stream_id)); pydict_set_item_string_decref(dict_sample, "pid", _PyLong_FromLong(sample->pid)); pydict_set_item_string_decref(dict_sample, "tid", @@ -1306,7 +1310,7 @@ static void python_export_sample_table(struct db_export *dbe, struct tables *tables = container_of(dbe, struct tables, dbe); PyObject *t; - t = tuple_new(25); + t = tuple_new(27); tuple_set_d64(t, 0, es->db_id); tuple_set_d64(t, 1, es->evsel->db_id); @@ -1333,6 +1337,8 @@ static void python_export_sample_table(struct db_export *dbe, tuple_set_d64(t, 22, es->sample->insn_cnt); tuple_set_d64(t, 23, es->sample->cyc_cnt); tuple_set_s32(t, 24, es->sample->flags); + tuple_set_d64(t, 25, es->sample->id); + tuple_set_d64(t, 26, es->sample->stream_id); call_object(tables->sample_handler, t, "sample_table"); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 199d3e8df3..06d0bd7fb4 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -2720,6 +2720,17 @@ size_t perf_session__fprintf(struct perf_session *session, FILE *fp) return machine__fprintf(&session->machines.host, fp); } +void perf_session__dump_kmaps(struct perf_session *session) +{ + int save_verbose = verbose; + + fflush(stdout); + fprintf(stderr, "Kernel and module maps:\n"); + verbose = 0; /* Suppress verbose to print a summary only */ + maps__fprintf(machine__kernel_maps(&session->machines.host), stderr); + verbose = save_verbose; +} + struct evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type) { diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index ee3715e856..5064c6ec11 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h @@ -133,6 +133,8 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp, bool skip_empty); +void perf_session__dump_kmaps(struct perf_session *session); + struct evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 79d5e2955f..3107f5aa8c 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -85,6 +85,7 @@ if '-DHAVE_LIBTRACEEVENT' in cflags: extra_libraries += [ 'traceevent' ] else: ext_sources.remove('util/trace-event.c') + ext_sources.remove('util/trace-event-parse.c') # use full paths with source files ext_sources = list(map(lambda x: '%s/%s' % (src_perf, x) , ext_sources)) diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 30254eb637..92a1bd695e 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -3372,7 +3372,7 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok, sort_dimension_add_dynamic_header(sd); } - if (sd->entry == &sort_parent) { + if (sd->entry == &sort_parent && parent_pattern) { int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED); if (ret) { char err[BUFSIZ]; diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index b7d00a538d..91d2f7f65d 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -201,6 +201,9 @@ static void print_aggr_id_std(struct perf_stat_config *config, snprintf(buf, sizeof(buf), "S%d-D%d-L%d-ID%d", id.socket, id.die, id.cache_lvl, id.cache); break; + case AGGR_CLUSTER: + snprintf(buf, sizeof(buf), "S%d-D%d-CLS%d", id.socket, id.die, id.cluster); + break; case AGGR_DIE: snprintf(buf, sizeof(buf), "S%d-D%d", id.socket, id.die); break; @@ -251,6 +254,10 @@ static void print_aggr_id_csv(struct perf_stat_config *config, fprintf(config->output, "S%d-D%d-L%d-ID%d%s%d%s", id.socket, id.die, id.cache_lvl, id.cache, sep, aggr_nr, sep); break; + case AGGR_CLUSTER: + fprintf(config->output, "S%d-D%d-CLS%d%s%d%s", + id.socket, id.die, id.cluster, sep, aggr_nr, sep); + break; case AGGR_DIE: fprintf(output, "S%d-D%d%s%d%s", id.socket, id.die, sep, aggr_nr, sep); @@ -300,6 +307,10 @@ static void print_aggr_id_json(struct perf_stat_config *config, fprintf(output, "\"cache\" : \"S%d-D%d-L%d-ID%d\", \"aggregate-number\" : %d, ", id.socket, id.die, id.cache_lvl, id.cache, aggr_nr); break; + case AGGR_CLUSTER: + fprintf(output, "\"cluster\" : \"S%d-D%d-CLS%d\", \"aggregate-number\" : %d, ", + id.socket, id.die, id.cluster, aggr_nr); + break; case AGGR_DIE: fprintf(output, "\"die\" : \"S%d-D%d\", \"aggregate-number\" : %d, ", id.socket, id.die, aggr_nr); @@ -1126,11 +1137,16 @@ static void print_no_aggr_metric(struct perf_stat_config *config, u64 ena, run, val; double uval; struct perf_stat_evsel *ps = counter->stats; - int aggr_idx = perf_cpu_map__idx(evsel__cpus(counter), cpu); + int aggr_idx = 0; - if (aggr_idx < 0) + if (!perf_cpu_map__has(evsel__cpus(counter), cpu)) continue; + cpu_aggr_map__for_each_idx(aggr_idx, config->aggr_map) { + if (config->aggr_map->map[aggr_idx].cpu.cpu == cpu.cpu) + break; + } + os->evsel = counter; os->id = aggr_cpu_id__cpu(cpu, /*data=*/NULL); if (first) { @@ -1207,6 +1223,9 @@ static void print_metric_headers(struct perf_stat_config *config, /* Print metrics headers only */ evlist__for_each_entry(evlist, counter) { + if (config->aggr_mode != AGGR_NONE && counter->metric_leader != counter) + continue; + os.evsel = counter; perf_stat__print_shadow_stats(config, counter, 0, @@ -1248,6 +1267,7 @@ static void print_header_interval_std(struct perf_stat_config *config, case AGGR_NODE: case AGGR_SOCKET: case AGGR_DIE: + case AGGR_CLUSTER: case AGGR_CACHE: case AGGR_CORE: fprintf(output, "#%*s %-*s cpus", @@ -1550,6 +1570,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf switch (config->aggr_mode) { case AGGR_CORE: case AGGR_CACHE: + case AGGR_CLUSTER: case AGGR_DIE: case AGGR_SOCKET: case AGGR_NODE: diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index cf573ff3fa..3466aa9524 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -355,11 +355,13 @@ static void print_nsecs(struct perf_stat_config *config, print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0); } -static int prepare_metric(struct evsel **metric_events, - struct metric_ref *metric_refs, +static int prepare_metric(const struct metric_expr *mexp, + const struct evsel *evsel, struct expr_parse_ctx *pctx, int aggr_idx) { + struct evsel * const *metric_events = mexp->metric_events; + struct metric_ref *metric_refs = mexp->metric_refs; int i; for (i = 0; metric_events[i]; i++) { @@ -398,12 +400,33 @@ static int prepare_metric(struct evsel **metric_events, source_count = 1; } else { struct perf_stat_evsel *ps = metric_events[i]->stats; - struct perf_stat_aggr *aggr = &ps->aggr[aggr_idx]; + struct perf_stat_aggr *aggr; + /* + * If there are multiple uncore PMUs and we're not + * reading the leader's stats, determine the stats for + * the appropriate uncore PMU. + */ + if (evsel && evsel->metric_leader && + evsel->pmu != evsel->metric_leader->pmu && + mexp->metric_events[i]->pmu == evsel->metric_leader->pmu) { + struct evsel *pos; + + evlist__for_each_entry(evsel->evlist, pos) { + if (pos->pmu != evsel->pmu) + continue; + if (pos->metric_leader != mexp->metric_events[i]) + continue; + ps = pos->stats; + source_count = 1; + break; + } + } + aggr = &ps->aggr[aggr_idx]; if (!aggr) break; - if (!metric_events[i]->supported) { + if (!metric_events[i]->supported) { /* * Not supported events will have a count of 0, * which can be confusing in a @@ -415,7 +438,8 @@ static int prepare_metric(struct evsel **metric_events, source_count = 0; } else { val = aggr->counts.val; - source_count = evsel__source_count(metric_events[i]); + if (!source_count) + source_count = evsel__source_count(metric_events[i]); } } n = strdup(evsel__metric_id(metric_events[i])); @@ -436,18 +460,18 @@ static int prepare_metric(struct evsel **metric_events, } static void generic_metric(struct perf_stat_config *config, - const char *metric_expr, - const char *metric_threshold, - struct evsel **metric_events, - struct metric_ref *metric_refs, - char *name, - const char *metric_name, - const char *metric_unit, - int runtime, + struct metric_expr *mexp, + struct evsel *evsel, int aggr_idx, struct perf_stat_output_ctx *out) { print_metric_t print_metric = out->print_metric; + const char *metric_name = mexp->metric_name; + const char *metric_expr = mexp->metric_expr; + const char *metric_threshold = mexp->metric_threshold; + const char *metric_unit = mexp->metric_unit; + struct evsel * const *metric_events = mexp->metric_events; + int runtime = mexp->runtime; struct expr_parse_ctx *pctx; double ratio, scale, threshold; int i; @@ -462,7 +486,7 @@ static void generic_metric(struct perf_stat_config *config, pctx->sctx.user_requested_cpu_list = strdup(config->user_requested_cpu_list); pctx->sctx.runtime = runtime; pctx->sctx.system_wide = config->system_wide; - i = prepare_metric(metric_events, metric_refs, pctx, aggr_idx); + i = prepare_metric(mexp, evsel, pctx, aggr_idx); if (i < 0) { expr__ctx_free(pctx); return; @@ -497,18 +521,18 @@ static void generic_metric(struct perf_stat_config *config, print_metric(config, ctxp, color, "%8.2f", metric_name ? metric_name : - out->force_header ? name : "", + out->force_header ? evsel->name : "", ratio); } } else { print_metric(config, ctxp, color, /*unit=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } } else { print_metric(config, ctxp, color, /*unit=*/NULL, out->force_header ? - (metric_name ? metric_name : name) : "", 0); + (metric_name ?: evsel->name) : "", 0); } expr__ctx_free(pctx); @@ -523,7 +547,7 @@ double test_generic_metric(struct metric_expr *mexp, int aggr_idx) if (!pctx) return NAN; - if (prepare_metric(mexp->metric_events, mexp->metric_refs, pctx, aggr_idx) < 0) + if (prepare_metric(mexp, /*evsel=*/NULL, pctx, aggr_idx) < 0) goto out; if (expr__parse(&ratio, pctx, mexp->metric_expr)) @@ -625,10 +649,7 @@ void *perf_stat__print_shadow_stats_metricgroup(struct perf_stat_config *config, if ((*num)++ > 0) out->new_line(config, ctxp); - generic_metric(config, mexp->metric_expr, mexp->metric_threshold, - mexp->metric_events, mexp->metric_refs, evsel->name, - mexp->metric_name, mexp->metric_unit, mexp->runtime, - aggr_idx, out); + generic_metric(config, mexp, evsel, aggr_idx, out); } return NULL; diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 4357ba1148..d6e5c8787b 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -48,6 +48,7 @@ enum aggr_mode { AGGR_GLOBAL, AGGR_SOCKET, AGGR_DIE, + AGGR_CLUSTER, AGGR_CACHE, AGGR_CORE, AGGR_THREAD, diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4b934ed3bf..0b91f813c4 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -23,6 +23,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/zalloc.h> +#include <linux/string.h> #include <symbol/kallsyms.h> #include <internal/lib.h> @@ -1329,6 +1330,58 @@ out_close: return -1; } +static bool is_exe_text(int flags) +{ + return (flags & (SHF_ALLOC | SHF_EXECINSTR)) == (SHF_ALLOC | SHF_EXECINSTR); +} + +/* + * Some executable module sections like .noinstr.text might be laid out with + * .text so they can use the same mapping (memory address to file offset). + * Check if that is the case. Refer to kernel layout_sections(). Return the + * maximum offset. + */ +static u64 max_text_section(Elf *elf, GElf_Ehdr *ehdr) +{ + Elf_Scn *sec = NULL; + GElf_Shdr shdr; + u64 offs = 0; + + /* Doesn't work for some arch */ + if (ehdr->e_machine == EM_PARISC || + ehdr->e_machine == EM_ALPHA) + return 0; + + /* ELF is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(elf, ehdr->e_shstrndx), NULL)) + return 0; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *sec_name; + + if (!gelf_getshdr(sec, &shdr)) + break; + + if (!is_exe_text(shdr.sh_flags)) + continue; + + /* .init and .exit sections are not placed with .text */ + sec_name = elf_strptr(elf, ehdr->e_shstrndx, shdr.sh_name); + if (!sec_name || + strstarts(sec_name, ".init") || + strstarts(sec_name, ".exit")) + break; + + /* Must be next to previous, assumes .text is first */ + if (offs && PERF_ALIGN(offs, shdr.sh_addralign ?: 1) != shdr.sh_offset) + break; + + offs = shdr.sh_offset + shdr.sh_size; + } + + return offs; +} + /** * ref_reloc_sym_not_found - has kernel relocation symbol been found. * @kmap: kernel maps and relocation reference symbol @@ -1368,7 +1421,8 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, struct maps *kmaps, struct kmap *kmap, struct dso **curr_dsop, struct map **curr_mapp, const char *section_name, - bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) + bool adjust_kernel_syms, bool kmodule, bool *remap_kernel, + u64 max_text_sh_offset) { struct dso *curr_dso = *curr_dsop; struct map *curr_map; @@ -1424,6 +1478,17 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, if (!kmap) return 0; + /* + * perf does not record module section addresses except for .text, but + * some sections can use the same mapping as .text. + */ + if (kmodule && adjust_kernel_syms && is_exe_text(shdr->sh_flags) && + shdr->sh_offset <= max_text_sh_offset) { + *curr_mapp = map; + *curr_dsop = dso; + return 0; + } + snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); curr_map = maps__find_by_name(kmaps, dso_name); @@ -1470,8 +1535,10 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, dso__set_loaded(curr_dso); *curr_mapp = curr_map; *curr_dsop = curr_dso; - } else + } else { *curr_dsop = map__dso(curr_map); + map__put(curr_map); + } return 0; } @@ -1497,6 +1564,7 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, Elf *elf; int nr = 0; bool remap_kernel = false, adjust_kernel_syms = false; + u64 max_text_sh_offset = 0; if (kmap && !kmaps) return -1; @@ -1584,6 +1652,10 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, remap_kernel = true; adjust_kernel_syms = dso->adjust_symbols; } + + if (kmodule && adjust_kernel_syms) + max_text_sh_offset = max_text_section(runtime_ss->elf, &runtime_ss->ehdr); + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { struct symbol *f; const char *elf_name = elf_sym__name(&sym, symstrs); @@ -1673,7 +1745,8 @@ dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, if (dso->kernel) { if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, - section_name, adjust_kernel_syms, kmodule, &remap_kernel)) + section_name, adjust_kernel_syms, kmodule, + &remap_kernel, max_text_sh_offset)) goto out_elf_end; } else if ((used_opd && runtime_ss->adjust_symbols) || (!used_opd && syms_ss->adjust_symbols)) { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index be212ba157..68dbeae8d2 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -63,6 +63,16 @@ struct symbol_conf symbol_conf = { .res_sample = 0, }; +struct map_list_node { + struct list_head node; + struct map *map; +}; + +static struct map_list_node *map_list_node__new(void) +{ + return malloc(sizeof(struct map_list_node)); +} + static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__KALLSYMS, DSO_BINARY_TYPE__GUEST_KALLSYMS, @@ -238,14 +248,31 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) * segment is very big. Therefore do not fill this gap and do * not assign it to the kernel dso map (kallsyms). * + * Also BPF code can be allocated separately from text segments + * and modules. So the last entry in a module should not fill + * the gap too. + * * In kallsyms, it determines module symbols using '[' character * like in: * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] */ if (prev->end == prev->start) { + const char *prev_mod; + const char *curr_mod; + + if (!is_kallsyms) { + prev->end = curr->start; + continue; + } + + prev_mod = strchr(prev->name, '['); + curr_mod = strchr(curr->name, '['); + /* Last kernel/module symbol mapped to end of page */ - if (is_kallsyms && (!strchr(prev->name, '[') != - !strchr(curr->name, '['))) + if (!prev_mod != !curr_mod) + prev->end = roundup(prev->end + 4096, 4096); + /* Last symbol in the previous module */ + else if (prev_mod && strcmp(prev_mod, curr_mod)) prev->end = roundup(prev->end + 4096, 4096); else prev->end = curr->start; @@ -757,7 +784,6 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename) static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) { - struct map *curr_map; struct symbol *pos; int count = 0; struct rb_root_cached old_root = dso->symbols; @@ -770,6 +796,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) *root = RB_ROOT_CACHED; while (next) { + struct map *curr_map; struct dso *curr_map_dso; char *module; @@ -796,6 +823,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) pos->end -= map__start(curr_map) - map__pgoff(curr_map); symbols__insert(&curr_map_dso->symbols, pos); ++count; + map__put(curr_map); } /* Symbols have been adjusted */ @@ -813,7 +841,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, struct map *initial_map) { struct machine *machine; - struct map *curr_map = initial_map; + struct map *curr_map = map__get(initial_map); struct symbol *pos; int count = 0, moved = 0; struct rb_root_cached *root = &dso->symbols; @@ -857,13 +885,14 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, dso__set_loaded(curr_map_dso); } + map__zput(curr_map); curr_map = maps__find_by_name(kmaps, module); if (curr_map == NULL) { pr_debug("%s/proc/{kallsyms,modules} " "inconsistency while looking " "for \"%s\" module!\n", machine->root_dir, module); - curr_map = initial_map; + curr_map = map__get(initial_map); goto discard_symbol; } curr_map_dso = map__dso(curr_map); @@ -887,7 +916,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, * symbols at this point. */ goto discard_symbol; - } else if (curr_map != initial_map) { + } else if (!RC_CHK_EQUAL(curr_map, initial_map)) { char dso_name[PATH_MAX]; struct dso *ndso; @@ -898,7 +927,8 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, } if (count == 0) { - curr_map = initial_map; + map__zput(curr_map); + curr_map = map__get(initial_map); goto add_symbol; } @@ -912,6 +942,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, kernel_range++); ndso = dso__new(dso_name); + map__zput(curr_map); if (ndso == NULL) return -1; @@ -925,6 +956,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); if (maps__insert(kmaps, curr_map)) { + map__zput(curr_map); dso__put(ndso); return -1; } @@ -935,7 +967,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, pos->end -= delta; } add_symbol: - if (curr_map != initial_map) { + if (!RC_CHK_EQUAL(curr_map, initial_map)) { struct dso *curr_map_dso = map__dso(curr_map); rb_erase_cached(&pos->rb_node, root); @@ -950,12 +982,12 @@ discard_symbol: symbol__delete(pos); } - if (curr_map != initial_map && + if (!RC_CHK_EQUAL(curr_map, initial_map) && dso->kernel == DSO_SPACE__KERNEL_GUEST && machine__is_default_guest(maps__machine(kmaps))) { dso__set_loaded(map__dso(curr_map)); } - + map__put(curr_map); return count + moved; } @@ -1255,7 +1287,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, { struct maps *kmaps = map__kmaps(map); struct kcore_mapfn_data md; - struct map *replacement_map = NULL; + struct map *map_ref, *replacement_map = NULL; struct machine *machine; bool is_64_bit; int err, fd; @@ -1333,6 +1365,24 @@ static int dso__load_kcore(struct dso *dso, struct map *map, if (!replacement_map) replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; + /* + * Update addresses of vmlinux map. Re-insert it to ensure maps are + * correctly ordered. Do this before using maps__merge_in() for the + * remaining maps so vmlinux gets split if necessary. + */ + map_ref = map__get(map); + maps__remove(kmaps, map_ref); + + map__set_start(map_ref, map__start(replacement_map)); + map__set_end(map_ref, map__end(replacement_map)); + map__set_pgoff(map_ref, map__pgoff(replacement_map)); + map__set_mapping_type(map_ref, map__mapping_type(replacement_map)); + + err = maps__insert(kmaps, map_ref); + map__put(map_ref); + if (err) + goto out_err; + /* Add new maps */ while (!list_empty(&md.maps)) { struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); @@ -1340,22 +1390,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, list_del_init(&new_node->node); - if (RC_CHK_EQUAL(new_map, replacement_map)) { - struct map *map_ref; - - map__set_start(map, map__start(new_map)); - map__set_end(map, map__end(new_map)); - map__set_pgoff(map, map__pgoff(new_map)); - map__set_mapping_type(map, map__mapping_type(new_map)); - /* Ensure maps are correctly ordered */ - map_ref = map__get(map); - maps__remove(kmaps, map_ref); - err = maps__insert(kmaps, map_ref); - map__put(map_ref); - map__put(new_map); - if (err) - goto out_err; - } else { + /* skip if replacement_map, already inserted above */ + if (!RC_CHK_EQUAL(new_map, replacement_map)) { /* * Merge kcore map into existing maps, * and ensure that current maps (eBPF) @@ -1937,6 +1973,10 @@ out: return ret; } +/* + * Always takes ownership of vmlinux when vmlinux_allocated == true, even if + * it returns an error. + */ int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, bool vmlinux_allocated) { @@ -1955,8 +1995,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map, else symtab_type = DSO_BINARY_TYPE__VMLINUX; - if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) + if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) { + if (vmlinux_allocated) + free((char *) vmlinux); return -1; + } /* * dso__load_sym() may copy 'dso' which will result in the copies having @@ -1999,7 +2042,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map) err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) goto out; - free(filename); } out: return err; @@ -2151,7 +2193,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map) err = dso__load_vmlinux(dso, map, filename, true); if (err > 0) return err; - free(filename); } if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 89c47a5098..515726489e 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -26,7 +26,7 @@ int thread__init_maps(struct thread *thread, struct machine *machine) if (pid == thread__tid(thread) || pid == -1) { thread__set_maps(thread, maps__new(machine)); } else { - struct thread *leader = __machine__findnew_thread(machine, pid, pid); + struct thread *leader = machine__findnew_thread(machine, pid, pid); if (leader) { thread__set_maps(thread, maps__get(thread__maps(leader))); @@ -39,12 +39,13 @@ int thread__init_maps(struct thread *thread, struct machine *machine) struct thread *thread__new(pid_t pid, pid_t tid) { - char *comm_str; - struct comm *comm; RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread)); struct thread *thread; if (ADD_RC_CHK(thread, _thread) != NULL) { + struct comm *comm; + char comm_str[32]; + thread__set_pid(thread, pid); thread__set_tid(thread, tid); thread__set_ppid(thread, -1); @@ -56,13 +57,8 @@ struct thread *thread__new(pid_t pid, pid_t tid) init_rwsem(thread__namespaces_lock(thread)); init_rwsem(thread__comm_lock(thread)); - comm_str = malloc(32); - if (!comm_str) - goto err_thread; - - snprintf(comm_str, 32, ":%d", tid); + snprintf(comm_str, sizeof(comm_str), ":%d", tid); comm = comm__new(comm_str, 0, false); - free(comm_str); if (!comm) goto err_thread; @@ -76,7 +72,7 @@ struct thread *thread__new(pid_t pid, pid_t tid) return thread; err_thread: - free(thread); + thread__delete(thread); return NULL; } @@ -383,7 +379,7 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool if (thread__pid(thread) == thread__pid(parent)) return thread__prepare_access(thread); - if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) { + if (maps__equal(thread__maps(thread), thread__maps(parent))) { pr_debug("broken map groups on thread %d/%d parent %d/%d\n", thread__pid(thread), thread__tid(thread), thread__pid(parent), thread__tid(parent)); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 0df775b5c1..8b4a3c69ba 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -3,7 +3,6 @@ #define __PERF_THREAD_H #include <linux/refcount.h> -#include <linux/rbtree.h> #include <linux/list.h> #include <stdio.h> #include <unistd.h> @@ -13,7 +12,6 @@ #include <strlist.h> #include <intlist.h> #include "rwsem.h" -#include "event.h" #include "callchain.h" #include <internal/rc_check.h> @@ -30,11 +28,6 @@ struct lbr_stitch { struct callchain_cursor_node *prev_lbr_cursor; }; -struct thread_rb_node { - struct rb_node rb_node; - struct thread *thread; -}; - DECLARE_RC_STRUCT(thread) { /** @maps: mmaps associated with this thread. */ struct maps *maps; diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index ea3b431b97..b5f12390c3 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -109,9 +109,10 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid) snprintf(path, sizeof(path), "/proc/%d/task", pid); items = scandir(path, &namelist, filter, NULL); - if (items <= 0) - goto out_free_closedir; - + if (items <= 0) { + pr_debug("scandir for %d returned empty, skipping\n", pid); + continue; + } while (threads->nr + items >= max_threads) { max_threads *= 2; grow = true; @@ -152,8 +153,6 @@ out_free_namelist: for (i = 0; i < items; i++) zfree(&namelist[i]); free(namelist); - -out_free_closedir: zfree(&threads); goto out_closedir; } diff --git a/tools/perf/util/threads.c b/tools/perf/util/threads.c new file mode 100644 index 0000000000..ff2b169e00 --- /dev/null +++ b/tools/perf/util/threads.c @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "threads.h" +#include "machine.h" +#include "thread.h" + +static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid) +{ + /* Cast it to handle tid == -1 */ + return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE]; +} + +static size_t key_hash(long key, void *ctx __maybe_unused) +{ + /* The table lookup removes low bit entropy, but this is just ignored here. */ + return key; +} + +static bool key_equal(long key1, long key2, void *ctx __maybe_unused) +{ + return key1 == key2; +} + +void threads__init(struct threads *threads) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + hashmap__init(&table->shard, key_hash, key_equal, NULL); + init_rwsem(&table->lock); + table->last_match = NULL; + } +} + +void threads__exit(struct threads *threads) +{ + threads__remove_all_threads(threads); + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + hashmap__clear(&table->shard); + exit_rwsem(&table->lock); + } +} + +size_t threads__nr(struct threads *threads) +{ + size_t nr = 0; + + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + + down_read(&table->lock); + nr += hashmap__size(&table->shard); + up_read(&table->lock); + } + return nr; +} + +/* + * Front-end cache - TID lookups come in blocks, + * so most of the time we dont have to look up + * the full rbtree: + */ +static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table, + pid_t tid) +{ + struct thread *th, *res = NULL; + + th = table->last_match; + if (th != NULL) { + if (thread__tid(th) == tid) + res = thread__get(th); + } + return res; +} + +static void __threads_table_entry__set_last_match(struct threads_table_entry *table, + struct thread *th) +{ + thread__put(table->last_match); + table->last_match = thread__get(th); +} + +static void threads_table_entry__set_last_match(struct threads_table_entry *table, + struct thread *th) +{ + down_write(&table->lock); + __threads_table_entry__set_last_match(table, th); + up_write(&table->lock); +} + +struct thread *threads__find(struct threads *threads, pid_t tid) +{ + struct threads_table_entry *table = threads__table(threads, tid); + struct thread *res; + + down_read(&table->lock); + res = __threads_table_entry__get_last_match(table, tid); + if (!res) { + if (hashmap__find(&table->shard, tid, &res)) + res = thread__get(res); + } + up_read(&table->lock); + if (res) + threads_table_entry__set_last_match(table, res); + return res; +} + +struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created) +{ + struct threads_table_entry *table = threads__table(threads, tid); + struct thread *res = NULL; + + *created = false; + down_write(&table->lock); + res = thread__new(pid, tid); + if (res) { + if (hashmap__add(&table->shard, tid, res)) { + /* Add failed. Assume a race so find other entry. */ + thread__put(res); + res = NULL; + if (hashmap__find(&table->shard, tid, &res)) + res = thread__get(res); + } else { + res = thread__get(res); + *created = true; + } + if (res) + __threads_table_entry__set_last_match(table, res); + } + up_write(&table->lock); + return res; +} + +void threads__remove_all_threads(struct threads *threads) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + struct hashmap_entry *cur, *tmp; + size_t bkt; + + down_write(&table->lock); + __threads_table_entry__set_last_match(table, NULL); + hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) { + struct thread *old_value; + + hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value); + thread__put(old_value); + } + up_write(&table->lock); + } +} + +void threads__remove(struct threads *threads, struct thread *thread) +{ + struct threads_table_entry *table = threads__table(threads, thread__tid(thread)); + struct thread *old_value; + + down_write(&table->lock); + if (table->last_match && RC_CHK_EQUAL(table->last_match, thread)) + __threads_table_entry__set_last_match(table, NULL); + + hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value); + thread__put(old_value); + up_write(&table->lock); +} + +int threads__for_each_thread(struct threads *threads, + int (*fn)(struct thread *thread, void *data), + void *data) +{ + for (int i = 0; i < THREADS__TABLE_SIZE; i++) { + struct threads_table_entry *table = &threads->table[i]; + struct hashmap_entry *cur; + size_t bkt; + + down_read(&table->lock); + hashmap__for_each_entry((&table->shard), cur, bkt) { + int rc = fn((struct thread *)cur->pvalue, data); + + if (rc != 0) { + up_read(&table->lock); + return rc; + } + } + up_read(&table->lock); + } + return 0; + +} diff --git a/tools/perf/util/threads.h b/tools/perf/util/threads.h new file mode 100644 index 0000000000..da68d2223f --- /dev/null +++ b/tools/perf/util/threads.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PERF_THREADS_H +#define __PERF_THREADS_H + +#include "hashmap.h" +#include "rwsem.h" + +struct thread; + +#define THREADS__TABLE_BITS 3 +#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS) + +struct threads_table_entry { + /* Key is tid, value is struct thread. */ + struct hashmap shard; + struct rw_semaphore lock; + struct thread *last_match; +}; + +struct threads { + struct threads_table_entry table[THREADS__TABLE_SIZE]; +}; + +void threads__init(struct threads *threads); +void threads__exit(struct threads *threads); +size_t threads__nr(struct threads *threads); +struct thread *threads__find(struct threads *threads, pid_t tid); +struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created); +void threads__remove_all_threads(struct threads *threads); +void threads__remove(struct threads *threads, struct thread *thread); +int threads__for_each_thread(struct threads *threads, + int (*fn)(struct thread *thread, void *data), + void *data); + +#endif /* __PERF_THREADS_H */ diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 2d3c2576ba..f0332bd3a5 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -122,6 +122,119 @@ void event_format__print(struct tep_event *event, return event_format__fprintf(event, cpu, data, size, stdout); } +/* + * prev_state is of size long, which is 32 bits on 32 bit architectures. + * As it needs to have the same bits for both 32 bit and 64 bit architectures + * we can just assume that the flags we care about will all be within + * the 32 bits. + */ +#define MAX_STATE_BITS 32 + +static const char *convert_sym(struct tep_print_flag_sym *sym) +{ + static char save_states[MAX_STATE_BITS + 1]; + + memset(save_states, 0, sizeof(save_states)); + + /* This is the flags for the prev_state_field, now make them into a string */ + for (; sym; sym = sym->next) { + long bitmask = strtoul(sym->value, NULL, 0); + int i; + + for (i = 0; !(bitmask & 1); i++) + bitmask >>= 1; + + if (i >= MAX_STATE_BITS) + continue; + + save_states[i] = sym->str[0]; + } + + return save_states; +} + +static struct tep_print_arg_field * +find_arg_field(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_arg_field *field; + + if (!arg) + return NULL; + + if (arg->type == TEP_PRINT_FIELD) + return &arg->field; + + if (arg->type == TEP_PRINT_OP) { + field = find_arg_field(prev_state_field, arg->op.left); + if (field && field->field == prev_state_field) + return field; + field = find_arg_field(prev_state_field, arg->op.right); + if (field && field->field == prev_state_field) + return field; + } + return NULL; +} + +static struct tep_print_flag_sym * +test_flags(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_arg_field *field; + + field = find_arg_field(prev_state_field, arg->flags.field); + if (!field) + return NULL; + + return arg->flags.flags; +} + +static struct tep_print_flag_sym * +search_op(struct tep_format_field *prev_state_field, struct tep_print_arg *arg) +{ + struct tep_print_flag_sym *sym = NULL; + + if (!arg) + return NULL; + + if (arg->type == TEP_PRINT_OP) { + sym = search_op(prev_state_field, arg->op.left); + if (sym) + return sym; + + sym = search_op(prev_state_field, arg->op.right); + if (sym) + return sym; + } else if (arg->type == TEP_PRINT_FLAGS) { + sym = test_flags(prev_state_field, arg); + } + + return sym; +} + +const char *parse_task_states(struct tep_format_field *state_field) +{ + struct tep_print_flag_sym *sym; + struct tep_print_arg *arg; + struct tep_event *event; + + event = state_field->event; + + /* + * Look at the event format fields, and search for where + * the prev_state is parsed via the format flags. + */ + for (arg = event->print_fmt.args; arg; arg = arg->next) { + /* + * Currently, the __print_flags() for the prev_state + * is embedded in operations, so they too must be + * searched. + */ + sym = search_op(state_field, arg); + if (sym) + return convert_sym(sym); + } + return NULL; +} + void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size __maybe_unused) { diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index a69ee29419..bbf8b26bc8 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -15,6 +15,7 @@ struct perf_tool; struct thread; struct tep_plugin_list; struct evsel; +struct tep_format_field; struct trace_event { struct tep_handle *pevent; @@ -51,6 +52,8 @@ int parse_event_file(struct tep_handle *pevent, unsigned long long raw_field_value(struct tep_event *event, const char *name, void *data); +const char *parse_task_states(struct tep_format_field *state_field); + void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size); diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 6013335a8d..b38d322734 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -263,7 +263,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg, struct unwind_info *ui, ui_buf = { .sample = data, .thread = thread, - .machine = RC_CHK_ACCESS(thread__maps(thread))->machine, + .machine = maps__machine((thread__maps(thread))), .cb = cb, .arg = arg, .max_stack = max_stack, diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index dac536e283..6a5ac0faa6 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -706,7 +706,7 @@ static int _unwind__prepare_access(struct maps *maps) { void *addr_space = unw_create_addr_space(&accessors, 0); - RC_CHK_ACCESS(maps)->addr_space = addr_space; + maps__set_addr_space(maps, addr_space); if (!addr_space) { pr_err("unwind: Can't create unwind address space.\n"); return -ENOMEM; diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80..2728eb4f13 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -12,11 +12,6 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; -static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) -{ - RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; -} - int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized) { const char *arch; @@ -60,7 +55,7 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized return 0; } out_register: - unwind__register_ops(maps, ops); + maps__set_unwind_libunwind_ops(maps, ops); err = maps__unwind_libunwind_ops(maps)->prepare_access(maps); if (initialized) diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c index c1fd9ba6d6..4f561e5e41 100644 --- a/tools/perf/util/util.c +++ b/tools/perf/util/util.c @@ -552,3 +552,22 @@ int sched_getcpu(void) return -1; } #endif + +#ifndef HAVE_SCANDIRAT_SUPPORT +int scandirat(int dirfd, const char *dirp, + struct dirent ***namelist, + int (*filter)(const struct dirent *), + int (*compar)(const struct dirent **, const struct dirent **)) +{ + char path[PATH_MAX]; + int err, fd = openat(dirfd, dirp, O_PATH); + + if (fd < 0) + return fd; + + snprintf(path, sizeof(path), "/proc/%d/fd/%d", getpid(), fd); + err = scandir(path, namelist, filter, compar); + close(fd); + return err; +} +#endif diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 7c8915d92d..9966c21aaf 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -6,6 +6,7 @@ /* glibc 2.20 deprecates _BSD_SOURCE in favour of _DEFAULT_SOURCE */ #define _DEFAULT_SOURCE 1 +#include <dirent.h> #include <fcntl.h> #include <stdbool.h> #include <stddef.h> @@ -56,6 +57,13 @@ int perf_tip(char **strp, const char *dirpath); int sched_getcpu(void); #endif +#ifndef HAVE_SCANDIRAT_SUPPORT +int scandirat(int dirfd, const char *dirp, + struct dirent ***namelist, + int (*filter)(const struct dirent *), + int (*compar)(const struct dirent **, const struct dirent **)); +#endif + extern bool perf_singlethreaded; void perf_set_singlethreaded(void); |