summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/symbol.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /tools/perf/util/symbol.c
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/util/symbol.c')
-rw-r--r--tools/perf/util/symbol.c306
1 files changed, 29 insertions, 277 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 82cc74b935..be212ba157 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name);
int vmlinux_path__nr_entries;
char **vmlinux_path;
-struct map_list_node {
- struct list_head node;
- struct map *map;
-};
-
struct symbol_conf symbol_conf = {
.nanosecs = false,
.use_modules = true,
@@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = {
#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
-static struct map_list_node *map_list_node__new(void)
-{
- return malloc(sizeof(struct map_list_node));
-}
-
static bool symbol_type__filter(char symbol_type)
{
symbol_type = toupper(symbol_type);
@@ -270,29 +260,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
curr->end = roundup(curr->start, 4096) + 4096;
}
-void maps__fixup_end(struct maps *maps)
-{
- struct map_rb_node *prev = NULL, *curr;
-
- down_write(maps__lock(maps));
-
- maps__for_each_entry(maps, curr) {
- if (prev != NULL && !map__end(prev->map))
- map__set_end(prev->map, map__start(curr->map));
-
- prev = curr;
- }
-
- /*
- * We still haven't the actual symbols, so guess the
- * last map final address.
- */
- if (curr && !map__end(curr->map))
- map__set_end(curr->map, ~0ULL);
-
- up_write(maps__lock(maps));
-}
-
struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
{
size_t namelen = strlen(name) + 1;
@@ -956,8 +923,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
return -1;
}
- map__set_map_ip(curr_map, identity__map_ip);
- map__set_unmap_ip(curr_map, identity__map_ip);
+ map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY);
if (maps__insert(kmaps, curr_map)) {
dso__put(ndso);
return -1;
@@ -1148,33 +1114,35 @@ out_delete_from:
return ret;
}
+static int do_validate_kcore_modules_cb(struct map *old_map, void *data)
+{
+ struct rb_root *modules = data;
+ struct module_info *mi;
+ struct dso *dso;
+
+ if (!__map__is_kmodule(old_map))
+ return 0;
+
+ dso = map__dso(old_map);
+ /* Module must be in memory at the same address */
+ mi = find_module(dso->short_name, modules);
+ if (!mi || mi->start != map__start(old_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
{
struct rb_root modules = RB_ROOT;
- struct map_rb_node *old_node;
int err;
err = read_proc_modules(filename, &modules);
if (err)
return err;
- maps__for_each_entry(kmaps, old_node) {
- struct map *old_map = old_node->map;
- struct module_info *mi;
- struct dso *dso;
+ err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules);
- if (!__map__is_kmodule(old_map)) {
- continue;
- }
- dso = map__dso(old_map);
- /* Module must be in memory at the same address */
- mi = find_module(dso->short_name, &modules);
- if (!mi || mi->start != map__start(old_map)) {
- err = -EINVAL;
- goto out;
- }
- }
-out:
delete_modules(&modules);
return err;
}
@@ -1271,101 +1239,15 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
return 0;
}
-/*
- * Merges map into maps by splitting the new map within the existing map
- * regions.
- */
-int maps__merge_in(struct maps *kmaps, struct map *new_map)
+static bool remove_old_maps(struct map *map, void *data)
{
- struct map_rb_node *rb_node;
- LIST_HEAD(merged);
- int err = 0;
-
- maps__for_each_entry(kmaps, rb_node) {
- struct map *old_map = rb_node->map;
-
- /* no overload with this one */
- if (map__end(new_map) < map__start(old_map) ||
- map__start(new_map) >= map__end(old_map))
- continue;
-
- if (map__start(new_map) < map__start(old_map)) {
- /*
- * |new......
- * |old....
- */
- if (map__end(new_map) < map__end(old_map)) {
- /*
- * |new......| -> |new..|
- * |old....| -> |old....|
- */
- map__set_end(new_map, map__start(old_map));
- } else {
- /*
- * |new.............| -> |new..| |new..|
- * |old....| -> |old....|
- */
- struct map_list_node *m = map_list_node__new();
-
- if (!m) {
- err = -ENOMEM;
- goto out;
- }
-
- m->map = map__clone(new_map);
- if (!m->map) {
- free(m);
- err = -ENOMEM;
- goto out;
- }
-
- map__set_end(m->map, map__start(old_map));
- list_add_tail(&m->node, &merged);
- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
- map__set_start(new_map, map__end(old_map));
- }
- } else {
- /*
- * |new......
- * |old....
- */
- if (map__end(new_map) < map__end(old_map)) {
- /*
- * |new..| -> x
- * |old.........| -> |old.........|
- */
- map__put(new_map);
- new_map = NULL;
- break;
- } else {
- /*
- * |new......| -> |new...|
- * |old....| -> |old....|
- */
- map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
- map__set_start(new_map, map__end(old_map));
- }
- }
- }
-
-out:
- while (!list_empty(&merged)) {
- struct map_list_node *old_node;
-
- old_node = list_entry(merged.next, struct map_list_node, node);
- list_del_init(&old_node->node);
- if (!err)
- err = maps__insert(kmaps, old_node->map);
- map__put(old_node->map);
- free(old_node);
- }
+ const struct map *map_to_save = data;
- if (new_map) {
- if (!err)
- err = maps__insert(kmaps, new_map);
- map__put(new_map);
- }
- return err;
+ /*
+ * We need to preserve eBPF maps even if they are covered by kcore,
+ * because we need to access eBPF dso for source data.
+ */
+ return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map);
}
static int dso__load_kcore(struct dso *dso, struct map *map,
@@ -1374,7 +1256,6 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
struct maps *kmaps = map__kmaps(map);
struct kcore_mapfn_data md;
struct map *replacement_map = NULL;
- struct map_rb_node *old_node, *next;
struct machine *machine;
bool is_64_bit;
int err, fd;
@@ -1421,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
}
/* Remove old maps */
- maps__for_each_entry_safe(kmaps, old_node, next) {
- struct map *old_map = old_node->map;
-
- /*
- * We need to preserve eBPF maps even if they are
- * covered by kcore, because we need to access
- * eBPF dso for source data.
- */
- if (old_map != map && !__map__is_bpf_prog(old_map))
- maps__remove(kmaps, old_map);
- }
+ maps__remove_maps(kmaps, remove_old_maps, map);
machine->trampolines_mapped = false;
/* Find the kernel map using the '_stext' symbol */
@@ -1475,8 +1346,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
map__set_start(map, map__start(new_map));
map__set_end(map, map__end(new_map));
map__set_pgoff(map, map__pgoff(new_map));
- map__set_map_ip(map, map__map_ip_ptr(new_map));
- map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
+ map__set_mapping_type(map, map__mapping_type(new_map));
/* Ensure maps are correctly ordered */
map_ref = map__get(map);
maps__remove(kmaps, map_ref);
@@ -2067,124 +1937,6 @@ out:
return ret;
}
-static int map__strcmp(const void *a, const void *b)
-{
- const struct map *map_a = *(const struct map **)a;
- const struct map *map_b = *(const struct map **)b;
- const struct dso *dso_a = map__dso(map_a);
- const struct dso *dso_b = map__dso(map_b);
- int ret = strcmp(dso_a->short_name, dso_b->short_name);
-
- if (ret == 0 && map_a != map_b) {
- /*
- * Ensure distinct but name equal maps have an order in part to
- * aid reference counting.
- */
- ret = (int)map__start(map_a) - (int)map__start(map_b);
- if (ret == 0)
- ret = (int)((intptr_t)map_a - (intptr_t)map_b);
- }
-
- return ret;
-}
-
-static int map__strcmp_name(const void *name, const void *b)
-{
- const struct dso *dso = map__dso(*(const struct map **)b);
-
- return strcmp(name, dso->short_name);
-}
-
-void __maps__sort_by_name(struct maps *maps)
-{
- qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
-}
-
-static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
-{
- struct map_rb_node *rb_node;
- struct map **maps_by_name = realloc(maps__maps_by_name(maps),
- maps__nr_maps(maps) * sizeof(struct map *));
- int i = 0;
-
- if (maps_by_name == NULL)
- return -1;
-
- up_read(maps__lock(maps));
- down_write(maps__lock(maps));
-
- RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
- RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
-
- maps__for_each_entry(maps, rb_node)
- maps_by_name[i++] = map__get(rb_node->map);
-
- __maps__sort_by_name(maps);
-
- up_write(maps__lock(maps));
- down_read(maps__lock(maps));
-
- return 0;
-}
-
-static struct map *__maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map **mapp;
-
- if (maps__maps_by_name(maps) == NULL &&
- map__groups__sort_by_name_from_rbtree(maps))
- return NULL;
-
- mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
- sizeof(*mapp), map__strcmp_name);
- if (mapp)
- return *mapp;
- return NULL;
-}
-
-struct map *maps__find_by_name(struct maps *maps, const char *name)
-{
- struct map_rb_node *rb_node;
- struct map *map;
-
- down_read(maps__lock(maps));
-
-
- if (RC_CHK_ACCESS(maps)->last_search_by_name) {
- const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
-
- if (strcmp(dso->short_name, name) == 0) {
- map = RC_CHK_ACCESS(maps)->last_search_by_name;
- goto out_unlock;
- }
- }
- /*
- * If we have maps->maps_by_name, then the name isn't in the rbtree,
- * as maps->maps_by_name mirrors the rbtree when lookups by name are
- * made.
- */
- map = __maps__find_by_name(maps, name);
- if (map || maps__maps_by_name(maps) != NULL)
- goto out_unlock;
-
- /* Fallback to traversing the rbtree... */
- maps__for_each_entry(maps, rb_node) {
- struct dso *dso;
-
- map = rb_node->map;
- dso = map__dso(map);
- if (strcmp(dso->short_name, name) == 0) {
- RC_CHK_ACCESS(maps)->last_search_by_name = map;
- goto out_unlock;
- }
- }
- map = NULL;
-
-out_unlock:
- up_read(maps__lock(maps));
- return map;
-}
-
int dso__load_vmlinux(struct dso *dso, struct map *map,
const char *vmlinux, bool vmlinux_allocated)
{