summaryrefslogtreecommitdiffstats
path: root/tools/perf/util/maps.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
commit638a9e433ecd61e64761352dbec1fa4f5874c941 (patch)
treefdbff74a238d7a5a7d1cef071b7230bc064b9f25 /tools/perf/util/maps.c
parentReleasing progress-linux version 6.9.12-1~progress7.99u1. (diff)
downloadlinux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz
linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/perf/util/maps.c')
-rw-r--r--tools/perf/util/maps.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
index ce13145a9f..eaada3e0f5 100644
--- a/tools/perf/util/maps.c
+++ b/tools/perf/util/maps.c
@@ -76,7 +76,7 @@ static void check_invariants(const struct maps *maps __maybe_unused)
/* Expect at least 1 reference count. */
assert(refcount_read(map__refcnt(map)) > 0);
- if (map__dso(map) && map__dso(map)->kernel)
+ if (map__dso(map) && dso__kernel(map__dso(map)))
assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
if (i > 0) {
@@ -124,11 +124,6 @@ static void maps__set_maps_by_address(struct maps *maps, struct map **new)
}
-static struct map ***maps__maps_by_name_addr(struct maps *maps)
-{
- return &RC_CHK_ACCESS(maps)->maps_by_name;
-}
-
static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
{
RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
@@ -211,11 +206,6 @@ void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libun
static struct rw_semaphore *maps__lock(struct maps *maps)
{
- /*
- * When the lock is acquired or released the maps invariants should
- * hold.
- */
- check_invariants(maps);
return &RC_CHK_ACCESS(maps)->lock;
}
@@ -289,6 +279,9 @@ void maps__put(struct maps *maps)
static void __maps__free_maps_by_name(struct maps *maps)
{
+ if (!maps__maps_by_name(maps))
+ return;
+
/*
* Free everything to try to do it from the rbtree in the next search
*/
@@ -296,6 +289,9 @@ static void __maps__free_maps_by_name(struct maps *maps)
map__put(maps__maps_by_name(maps)[i]);
zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
+
+ /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
+ maps__set_maps_by_name_sorted(maps, false);
}
static int map__start_cmp(const void *a, const void *b)
@@ -346,7 +342,7 @@ static int map__strcmp(const void *a, const void *b)
const struct map *map_b = *(const struct map * const *)b;
const struct dso *dso_a = map__dso(map_a);
const struct dso *dso_b = map__dso(map_b);
- int ret = strcmp(dso_a->short_name, dso_b->short_name);
+ int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
/* Ensure distinct but name equal maps have an order. */
@@ -358,6 +354,7 @@ static int map__strcmp(const void *a, const void *b)
static int maps__sort_by_name(struct maps *maps)
{
int err = 0;
+
down_write(maps__lock(maps));
if (!maps__maps_by_name_sorted(maps)) {
struct map **maps_by_name = maps__maps_by_name(maps);
@@ -384,6 +381,7 @@ static int maps__sort_by_name(struct maps *maps)
maps__set_maps_by_name_sorted(maps, true);
}
}
+ check_invariants(maps);
up_write(maps__lock(maps));
return err;
}
@@ -485,7 +483,7 @@ static int __maps__insert(struct maps *maps, struct map *new)
}
if (map__end(new) < map__start(new))
RC_CHK_ACCESS(maps)->ends_broken = true;
- if (dso && dso->kernel) {
+ if (dso && dso__kernel(dso)) {
struct kmap *kmap = map__kmap(new);
if (kmap)
@@ -502,6 +500,7 @@ int maps__insert(struct maps *maps, struct map *map)
down_write(maps__lock(maps));
ret = __maps__insert(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
return ret;
}
@@ -536,6 +535,7 @@ void maps__remove(struct maps *maps, struct map *map)
{
down_write(maps__lock(maps));
__maps__remove(maps, map);
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -602,6 +602,7 @@ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data
else
i++;
}
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -740,7 +741,6 @@ static unsigned int first_ending_after(struct maps *maps, const struct map *map)
*/
static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{
- struct map **maps_by_address;
int err = 0;
FILE *fp = debug_file();
@@ -748,12 +748,12 @@ sort_again:
if (!maps__maps_by_address_sorted(maps))
__maps__sort_by_address(maps);
- maps_by_address = maps__maps_by_address(maps);
/*
* Iterate through entries where the end of the existing entry is
* greater-than the new map's start.
*/
for (unsigned int i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
+ struct map **maps_by_address = maps__maps_by_address(maps);
struct map *pos = maps_by_address[i];
struct map *before = NULL, *after = NULL;
@@ -766,7 +766,7 @@ sort_again:
if (use_browser) {
pr_debug("overlapping maps in %s (disable tui for more info)\n",
- map__dso(new)->name);
+ dso__name(map__dso(new)));
} else if (verbose >= 2) {
pr_debug("overlapping maps:\n");
map__fprintf(new, fp);
@@ -820,8 +820,10 @@ sort_again:
/* Maps are still ordered, go to next one. */
i++;
if (after) {
- __maps__insert(maps, after);
+ err = __maps__insert(maps, after);
map__put(after);
+ if (err)
+ goto out_err;
if (!maps__maps_by_address_sorted(maps)) {
/*
* Sorting broken so invariants don't
@@ -850,7 +852,7 @@ sort_again:
check_invariants(maps);
}
/* Add the map. */
- __maps__insert(maps, new);
+ err = __maps__insert(maps, new);
out_err:
return err;
}
@@ -942,6 +944,8 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
map__put(new);
}
}
+ check_invariants(dest);
+
up_read(maps__lock(parent));
up_write(maps__lock(dest));
return err;
@@ -987,7 +991,7 @@ static int map__strcmp_name(const void *name, const void *b)
{
const struct dso *dso = map__dso(*(const struct map **)b);
- return strcmp(name, dso->short_name);
+ return strcmp(name, dso__short_name(dso));
}
struct map *maps__find_by_name(struct maps *maps, const char *name)
@@ -1006,7 +1010,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
- if (dso && strcmp(dso->short_name, name) == 0) {
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
result = map__get(maps__maps_by_name(maps)[i]);
done = true;
}
@@ -1043,7 +1047,7 @@ struct map *maps__find_by_name(struct maps *maps, const char *name)
struct map *pos = maps_by_address[i];
struct dso *dso = map__dso(pos);
- if (dso && strcmp(dso->short_name, name) == 0) {
+ if (dso && strcmp(dso__short_name(dso), name) == 0) {
result = map__get(pos);
break;
}
@@ -1097,6 +1101,7 @@ void maps__fixup_end(struct maps *maps)
map__set_end(maps_by_address[n - 1], ~0ULL);
RC_CHK_ACCESS(maps)->ends_broken = false;
+ check_invariants(maps);
up_write(maps__lock(maps));
}
@@ -1147,6 +1152,8 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
/* No overlap so regular insert suffices. */
int ret = __maps__insert(kmaps, new_map);
+
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return ret;
}
@@ -1162,8 +1169,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
}
maps__set_maps_by_address(kmaps, merged_maps_by_address);
maps__set_maps_by_address_sorted(kmaps, true);
- zfree(maps__maps_by_name_addr(kmaps));
- maps__set_maps_by_name_sorted(kmaps, true);
+ __maps__free_maps_by_name(kmaps);
maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
/* Copy entries before the new_map that can't overlap. */
@@ -1184,6 +1190,7 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map)
map__zput(kmaps_maps_by_address[i]);
free(kmaps_maps_by_address);
+ check_invariants(kmaps);
up_write(maps__lock(kmaps));
return 0;
}