summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/symbol.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/symbol.c')
-rw-r--r--tools/perf/util/symbol.c101
1 files changed, 71 insertions, 30 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index be212ba157..68dbeae8d2 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -63,6 +63,16 @@ struct symbol_conf symbol_conf = {
.res_sample = 0,
};
+struct map_list_node {
+ struct list_head node;
+ struct map *map;
+};
+
+static struct map_list_node *map_list_node__new(void)
+{
+ return malloc(sizeof(struct map_list_node));
+}
+
static enum dso_binary_type binary_type_symtab[] = {
DSO_BINARY_TYPE__KALLSYMS,
DSO_BINARY_TYPE__GUEST_KALLSYMS,
@@ -238,14 +248,31 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
* segment is very big. Therefore do not fill this gap and do
* not assign it to the kernel dso map (kallsyms).
*
+ * Also BPF code can be allocated separately from text segments
+ * and modules. So the last entry in a module should not fill
+ * the gap too.
+ *
* In kallsyms, it determines module symbols using '[' character
* like in:
* ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
*/
if (prev->end == prev->start) {
+ const char *prev_mod;
+ const char *curr_mod;
+
+ if (!is_kallsyms) {
+ prev->end = curr->start;
+ continue;
+ }
+
+ prev_mod = strchr(prev->name, '[');
+ curr_mod = strchr(curr->name, '[');
+
/* Last kernel/module symbol mapped to end of page */
- if (is_kallsyms && (!strchr(prev->name, '[') !=
- !strchr(curr->name, '[')))
+ if (!prev_mod != !curr_mod)
+ prev->end = roundup(prev->end + 4096, 4096);
+ /* Last symbol in the previous module */
+ else if (prev_mod && strcmp(prev_mod, curr_mod))
prev->end = roundup(prev->end + 4096, 4096);
else
prev->end = curr->start;
@@ -757,7 +784,6 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
{
- struct map *curr_map;
struct symbol *pos;
int count = 0;
struct rb_root_cached old_root = dso->symbols;
@@ -770,6 +796,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
*root = RB_ROOT_CACHED;
while (next) {
+ struct map *curr_map;
struct dso *curr_map_dso;
char *module;
@@ -796,6 +823,7 @@ static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
pos->end -= map__start(curr_map) - map__pgoff(curr_map);
symbols__insert(&curr_map_dso->symbols, pos);
++count;
+ map__put(curr_map);
}
/* Symbols have been adjusted */
@@ -813,7 +841,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
struct map *initial_map)
{
struct machine *machine;
- struct map *curr_map = initial_map;
+ struct map *curr_map = map__get(initial_map);
struct symbol *pos;
int count = 0, moved = 0;
struct rb_root_cached *root = &dso->symbols;
@@ -857,13 +885,14 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
dso__set_loaded(curr_map_dso);
}
+ map__zput(curr_map);
curr_map = maps__find_by_name(kmaps, module);
if (curr_map == NULL) {
pr_debug("%s/proc/{kallsyms,modules} "
"inconsistency while looking "
"for \"%s\" module!\n",
machine->root_dir, module);
- curr_map = initial_map;
+ curr_map = map__get(initial_map);
goto discard_symbol;
}
curr_map_dso = map__dso(curr_map);
@@ -887,7 +916,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
* symbols at this point.
*/
goto discard_symbol;
- } else if (curr_map != initial_map) {
+ } else if (!RC_CHK_EQUAL(curr_map, initial_map)) {
char dso_name[PATH_MAX];
struct dso *ndso;
@@ -898,7 +927,8 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
}
if (count == 0) {
- curr_map = initial_map;
+ map__zput(curr_map);
+ curr_map = map__get(initial_map);
goto add_symbol;
}
@@ -912,6 +942,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
kernel_range++);
ndso = dso__new(dso_name);
+ map__zput(curr_map);
if (ndso == NULL)
return -1;
@@ -925,6 +956,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
if (maps__insert(kmaps, curr_map)) {
+ map__zput(curr_map);
dso__put(ndso);
return -1;
}
@@ -935,7 +967,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
pos->end -= delta;
}
add_symbol:
- if (curr_map != initial_map) {
+ if (!RC_CHK_EQUAL(curr_map, initial_map)) {
struct dso *curr_map_dso = map__dso(curr_map);
rb_erase_cached(&pos->rb_node, root);
@@ -950,12 +982,12 @@ discard_symbol:
symbol__delete(pos);
}
- if (curr_map != initial_map &&
+ if (!RC_CHK_EQUAL(curr_map, initial_map) &&
dso->kernel == DSO_SPACE__KERNEL_GUEST &&
machine__is_default_guest(maps__machine(kmaps))) {
dso__set_loaded(map__dso(curr_map));
}
-
+ map__put(curr_map);
return count + moved;
}
@@ -1255,7 +1287,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
{
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
- struct map *replacement_map = NULL;
+ struct map *map_ref, *replacement_map = NULL;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1333,6 +1365,24 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
if (!replacement_map)
replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
+ /*
+ * Update addresses of vmlinux map. Re-insert it to ensure maps are
+ * correctly ordered. Do this before using maps__merge_in() for the
+ * remaining maps so vmlinux gets split if necessary.
+ */
+ map_ref = map__get(map);
+ maps__remove(kmaps, map_ref);
+
+ map__set_start(map_ref, map__start(replacement_map));
+ map__set_end(map_ref, map__end(replacement_map));
+ map__set_pgoff(map_ref, map__pgoff(replacement_map));
+ map__set_mapping_type(map_ref, map__mapping_type(replacement_map));
+
+ err = maps__insert(kmaps, map_ref);
+ map__put(map_ref);
+ if (err)
+ goto out_err;
+
/* Add new maps */
while (!list_empty(&md.maps)) {
struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
@@ -1340,22 +1390,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
list_del_init(&new_node->node);
- if (RC_CHK_EQUAL(new_map, replacement_map)) {
- struct map *map_ref;
-
- map__set_start(map, map__start(new_map));
- map__set_end(map, map__end(new_map));
- map__set_pgoff(map, map__pgoff(new_map));
- map__set_mapping_type(map, map__mapping_type(new_map));
- /* Ensure maps are correctly ordered */
- map_ref = map__get(map);
- maps__remove(kmaps, map_ref);
- err = maps__insert(kmaps, map_ref);
- map__put(map_ref);
- map__put(new_map);
- if (err)
- goto out_err;
- } else {
+ /* skip if replacement_map, already inserted above */
+ if (!RC_CHK_EQUAL(new_map, replacement_map)) {
/*
* Merge kcore map into existing maps,
* and ensure that current maps (eBPF)
@@ -1937,6 +1973,10 @@ out:
return ret;
}
+/*
+ * Always takes ownership of vmlinux when vmlinux_allocated == true, even if
+ * it returns an error.
+ */
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{
@@ -1955,8 +1995,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
else
symtab_type = DSO_BINARY_TYPE__VMLINUX;
- if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
+ if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) {
+ if (vmlinux_allocated)
+ free((char *) vmlinux);
return -1;
+ }
/*
* dso__load_sym() may copy 'dso' which will result in the copies having
@@ -1999,7 +2042,6 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map)
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
goto out;
- free(filename);
}
out:
return err;
@@ -2151,7 +2193,6 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map)
err = dso__load_vmlinux(dso, map, filename, true);
if (err > 0)
return err;
- free(filename);
}
if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {