summaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/api/fs/fs.c80
-rw-r--r--tools/lib/api/io.h11
-rw-r--r--tools/lib/bpf/bpf_core_read.h32
-rw-r--r--tools/lib/bpf/bpf_helpers.h3
-rw-r--r--tools/lib/bpf/elf.c5
-rw-r--r--tools/lib/bpf/libbpf.c726
-rw-r--r--tools/lib/bpf/libbpf.map3
-rw-r--r--tools/lib/bpf/libbpf_internal.h17
-rw-r--r--tools/lib/bpf/libbpf_version.h2
-rw-r--r--tools/lib/bpf/linker.c27
-rw-r--r--tools/lib/perf/Documentation/examples/sampling.c2
-rw-r--r--tools/lib/perf/Documentation/libperf-sampling.txt2
-rw-r--r--tools/lib/perf/Documentation/libperf.txt4
-rw-r--r--tools/lib/perf/cpumap.c65
-rw-r--r--tools/lib/perf/evlist.c24
-rw-r--r--tools/lib/perf/evsel.c2
-rw-r--r--tools/lib/perf/include/internal/evlist.h4
-rw-r--r--tools/lib/perf/include/internal/mmap.h3
-rw-r--r--tools/lib/perf/include/perf/cpumap.h40
-rw-r--r--tools/lib/perf/libperf.map6
-rw-r--r--tools/lib/perf/mmap.c20
-rw-r--r--tools/lib/perf/tests/test-cpumap.c4
-rw-r--r--tools/lib/perf/tests/test-evlist.c6
-rw-r--r--tools/lib/perf/tests/test-evsel.c2
24 files changed, 826 insertions, 264 deletions
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 5cb0eeec2c..337fde770e 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -16,6 +16,7 @@
#include <sys/mount.h>
#include "fs.h"
+#include "../io.h"
#include "debug-internal.h"
#define _STR(x) #x
@@ -344,53 +345,24 @@ int filename__read_ull(const char *filename, unsigned long long *value)
return filename__read_ull_base(filename, value, 0);
}
-#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */
-
int filename__read_str(const char *filename, char **buf, size_t *sizep)
{
- size_t size = 0, alloc_size = 0;
- void *bf = NULL, *nbf;
- int fd, n, err = 0;
- char sbuf[STRERR_BUFSIZE];
+ struct io io;
+ char bf[128];
+ int err;
- fd = open(filename, O_RDONLY);
- if (fd < 0)
+ io.fd = open(filename, O_RDONLY);
+ if (io.fd < 0)
return -errno;
-
- do {
- if (size == alloc_size) {
- alloc_size += BUFSIZ;
- nbf = realloc(bf, alloc_size);
- if (!nbf) {
- err = -ENOMEM;
- break;
- }
-
- bf = nbf;
- }
-
- n = read(fd, bf + size, alloc_size - size);
- if (n < 0) {
- if (size) {
- pr_warn("read failed %d: %s\n", errno,
- strerror_r(errno, sbuf, sizeof(sbuf)));
- err = 0;
- } else
- err = -errno;
-
- break;
- }
-
- size += n;
- } while (n > 0);
-
- if (!err) {
- *sizep = size;
- *buf = bf;
+ io__init(&io, io.fd, bf, sizeof(bf));
+ *buf = NULL;
+ err = io__getdelim(&io, buf, sizep, /*delim=*/-1);
+ if (err < 0) {
+ free(*buf);
+ *buf = NULL;
} else
- free(bf);
-
- close(fd);
+ err = 0;
+ close(io.fd);
return err;
}
@@ -475,15 +447,22 @@ int sysfs__read_str(const char *entry, char **buf, size_t *sizep)
int sysfs__read_bool(const char *entry, bool *value)
{
- char *buf;
- size_t size;
- int ret;
+ struct io io;
+ char bf[16];
+ int ret = 0;
+ char path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (!sysfs)
+ return -1;
- ret = sysfs__read_str(entry, &buf, &size);
- if (ret < 0)
- return ret;
+ snprintf(path, sizeof(path), "%s/%s", sysfs, entry);
+ io.fd = open(path, O_RDONLY);
+ if (io.fd < 0)
+ return -errno;
- switch (buf[0]) {
+ io__init(&io, io.fd, bf, sizeof(bf));
+ switch (io__get_char(&io)) {
case '1':
case 'y':
case 'Y':
@@ -497,8 +476,7 @@ int sysfs__read_bool(const char *entry, bool *value)
default:
ret = -1;
}
-
- free(buf);
+ close(io.fd);
return ret;
}
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
index 2a7fe97588..84adf81020 100644
--- a/tools/lib/api/io.h
+++ b/tools/lib/api/io.h
@@ -141,8 +141,8 @@ static inline int io__get_dec(struct io *io, __u64 *dec)
}
}
-/* Read up to and including the first newline following the pattern of getline. */
-static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+/* Read up to and including the first delim. */
+static inline ssize_t io__getdelim(struct io *io, char **line_out, size_t *line_len_out, int delim)
{
char buf[128];
int buf_pos = 0;
@@ -152,7 +152,7 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l
/* TODO: reuse previously allocated memory. */
free(*line_out);
- while (ch != '\n') {
+ while (ch != delim) {
ch = io__get_char(io);
if (ch < 0)
@@ -185,4 +185,9 @@ err_out:
return -ENOMEM;
}
+static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+{
+ return io__getdelim(io, line_out, line_len_out, /*delim=*/'\n');
+}
+
#endif /* __API_IO__ */
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 1ac57bb7ac..7325a12692 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -111,6 +111,38 @@ enum bpf_enum_value_kind {
val; \
})
+/*
+ * Write to a bitfield, identified by s->field.
+ * This is the inverse of BPF_CORE_WRITE_BITFIELD().
+ */
+#define BPF_CORE_WRITE_BITFIELD(s, field, new_val) ({ \
+ void *p = (void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
+ unsigned int byte_size = __CORE_RELO(s, field, BYTE_SIZE); \
+ unsigned int lshift = __CORE_RELO(s, field, LSHIFT_U64); \
+ unsigned int rshift = __CORE_RELO(s, field, RSHIFT_U64); \
+ unsigned long long mask, val, nval = new_val; \
+ unsigned int rpad = rshift - lshift; \
+ \
+ asm volatile("" : "+r"(p)); \
+ \
+ switch (byte_size) { \
+ case 1: val = *(unsigned char *)p; break; \
+ case 2: val = *(unsigned short *)p; break; \
+ case 4: val = *(unsigned int *)p; break; \
+ case 8: val = *(unsigned long long *)p; break; \
+ } \
+ \
+ mask = (~0ULL << rshift) >> lshift; \
+ val = (val & ~mask) | ((nval << rpad) & mask); \
+ \
+ switch (byte_size) { \
+ case 1: *(unsigned char *)p = val; break; \
+ case 2: *(unsigned short *)p = val; break; \
+ case 4: *(unsigned int *)p = val; break; \
+ case 8: *(unsigned long long *)p = val; break; \
+ } \
+})
+
#define ___bpf_field_ref1(field) (field)
#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
#define ___bpf_field_ref(args...) \
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 77ceea575d..2324cc42b0 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -188,6 +188,9 @@ enum libbpf_tristate {
!!sym; \
})
+#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
+#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
+
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
#endif
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index 2a62bf411b..b02faec748 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -407,7 +407,8 @@ static int symbol_cmp(const void *a, const void *b)
* size, that needs to be released by the caller.
*/
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
- const char **syms, unsigned long **poffsets)
+ const char **syms, unsigned long **poffsets,
+ int st_type)
{
int sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
int err = 0, i, cnt_done = 0;
@@ -438,7 +439,7 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt,
struct elf_sym_iter iter;
struct elf_sym *sym;
- err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], STT_FUNC);
+ err = elf_sym_iter_new(&iter, elf_fd.elf, binary_path, sh_types[i], st_type);
if (err == -ENOENT)
continue;
if (err)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index ca83af916c..92bca96587 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -33,6 +33,7 @@
#include <linux/filter.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
+#include <linux/bpf_perf_event.h>
#include <linux/ring_buffer.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
@@ -1504,6 +1505,16 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
return ERR_PTR(-ENOENT);
}
+static int create_placeholder_fd(void)
+{
+ int fd;
+
+ fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ if (fd < 0)
+ return -errno;
+ return fd;
+}
+
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
{
struct bpf_map *map;
@@ -1516,7 +1527,21 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
map = &obj->maps[obj->nr_maps++];
map->obj = obj;
- map->fd = -1;
+ /* Preallocate map FD without actually creating BPF map just yet.
+ * These map FD "placeholders" will be reused later without changing
+ * FD value when map is actually created in the kernel.
+ *
+ * This is useful to be able to perform BPF program relocations
+ * without having to create BPF maps before that step. This allows us
+ * to finalize and load BTF very late in BPF object's loading phase,
+ * right before BPF maps have to be created and BPF programs have to
+ * be loaded. By having these map FD placeholders we can perform all
+ * the sanitizations, relocations, and any other adjustments before we
+ * start creating actual BPF kernel objects (BTF, maps, progs).
+ */
+ map->fd = create_placeholder_fd();
+ if (map->fd < 0)
+ return ERR_PTR(map->fd);
map->inner_map_fd = -1;
map->autocreate = true;
@@ -2608,7 +2633,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map->inner_map = calloc(1, sizeof(*map->inner_map));
if (!map->inner_map)
return -ENOMEM;
- map->inner_map->fd = -1;
+ map->inner_map->fd = create_placeholder_fd();
+ if (map->inner_map->fd < 0)
+ return map->inner_map->fd;
map->inner_map->sec_idx = sec_idx;
map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
if (!map->inner_map->name)
@@ -3055,9 +3082,15 @@ static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
return false;
}
+static bool map_needs_vmlinux_btf(struct bpf_map *map)
+{
+ return bpf_map__is_struct_ops(map);
+}
+
static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
{
struct bpf_program *prog;
+ struct bpf_map *map;
int i;
/* CO-RE relocations need kernel BTF, only when btf_custom_path
@@ -3082,6 +3115,11 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
return true;
}
+ bpf_object__for_each_map(map, obj) {
+ if (map_needs_vmlinux_btf(map))
+ return true;
+ }
+
return false;
}
@@ -3156,86 +3194,6 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
}
}
- if (!kernel_supports(obj, FEAT_BTF_DECL_TAG))
- goto skip_exception_cb;
- for (i = 0; i < obj->nr_programs; i++) {
- struct bpf_program *prog = &obj->programs[i];
- int j, k, n;
-
- if (prog_is_subprog(obj, prog))
- continue;
- n = btf__type_cnt(obj->btf);
- for (j = 1; j < n; j++) {
- const char *str = "exception_callback:", *name;
- size_t len = strlen(str);
- struct btf_type *t;
-
- t = btf_type_by_id(obj->btf, j);
- if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
- continue;
-
- name = btf__str_by_offset(obj->btf, t->name_off);
- if (strncmp(name, str, len))
- continue;
-
- t = btf_type_by_id(obj->btf, t->type);
- if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
- pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
- prog->name);
- return -EINVAL;
- }
- if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)))
- continue;
- /* Multiple callbacks are specified for the same prog,
- * the verifier will eventually return an error for this
- * case, hence simply skip appending a subprog.
- */
- if (prog->exception_cb_idx >= 0) {
- prog->exception_cb_idx = -1;
- break;
- }
-
- name += len;
- if (str_is_empty(name)) {
- pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
- prog->name);
- return -EINVAL;
- }
-
- for (k = 0; k < obj->nr_programs; k++) {
- struct bpf_program *subprog = &obj->programs[k];
-
- if (!prog_is_subprog(obj, subprog))
- continue;
- if (strcmp(name, subprog->name))
- continue;
- /* Enforce non-hidden, as from verifier point of
- * view it expects global functions, whereas the
- * mark_btf_static fixes up linkage as static.
- */
- if (!subprog->sym_global || subprog->mark_btf_static) {
- pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
- prog->name, subprog->name);
- return -EINVAL;
- }
- /* Let's see if we already saw a static exception callback with the same name */
- if (prog->exception_cb_idx >= 0) {
- pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
- prog->name, subprog->name);
- return -EINVAL;
- }
- prog->exception_cb_idx = k;
- break;
- }
-
- if (prog->exception_cb_idx >= 0)
- continue;
- pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
- return -ENOENT;
- }
- }
-skip_exception_cb:
-
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *raw_data;
@@ -4539,14 +4497,12 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
goto err_free_new_name;
}
- err = zclose(map->fd);
- if (err) {
- err = -errno;
- goto err_close_new_fd;
- }
+ err = reuse_fd(map->fd, new_fd);
+ if (err)
+ goto err_free_new_name;
+
free(map->name);
- map->fd = new_fd;
map->name = new_name;
map->def.type = info.type;
map->def.key_size = info.key_size;
@@ -4560,8 +4516,6 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
return 0;
-err_close_new_fd:
- close(new_fd);
err_free_new_name:
free(new_name);
return libbpf_err(err);
@@ -5190,12 +5144,17 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
+static bool map_is_created(const struct bpf_map *map)
+{
+ return map->obj->loaded || map->reused;
+}
+
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
struct bpf_map_def *def = &map->def;
const char *map_name = NULL;
- int err = 0;
+ int err = 0, map_fd;
if (kernel_supports(obj, FEAT_PROG_NAME))
map_name = map->name;
@@ -5224,7 +5183,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
map->name, err);
return err;
}
- map->inner_map_fd = bpf_map__fd(map->inner_map);
+ map->inner_map_fd = map->inner_map->fd;
}
if (map->inner_map_fd >= 0)
create_attr.inner_map_fd = map->inner_map_fd;
@@ -5257,17 +5216,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
bpf_gen__map_create(obj->gen_loader, def->type, map_name,
def->key_size, def->value_size, def->max_entries,
&create_attr, is_inner ? -1 : map - obj->maps);
- /* Pretend to have valid FD to pass various fd >= 0 checks.
- * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ /* We keep pretenting we have valid FD to pass various fd >= 0
+ * checks by just keeping original placeholder FDs in place.
+ * See bpf_object__add_map() comment.
+ * This placeholder fd will not be used with any syscall and
+ * will be reset to -1 eventually.
*/
- map->fd = 0;
+ map_fd = map->fd;
} else {
- map->fd = bpf_map_create(def->type, map_name,
- def->key_size, def->value_size,
- def->max_entries, &create_attr);
+ map_fd = bpf_map_create(def->type, map_name,
+ def->key_size, def->value_size,
+ def->max_entries, &create_attr);
}
- if (map->fd < 0 && (create_attr.btf_key_type_id ||
- create_attr.btf_value_type_id)) {
+ if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
char *cp, errmsg[STRERR_BUFSIZE];
err = -errno;
@@ -5279,13 +5240,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
- map->fd = bpf_map_create(def->type, map_name,
- def->key_size, def->value_size,
- def->max_entries, &create_attr);
+ map_fd = bpf_map_create(def->type, map_name,
+ def->key_size, def->value_size,
+ def->max_entries, &create_attr);
}
- err = map->fd < 0 ? -errno : 0;
-
if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
if (obj->gen_loader)
map->inner_map->fd = -1;
@@ -5293,7 +5252,19 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
zfree(&map->inner_map);
}
- return err;
+ if (map_fd < 0)
+ return map_fd;
+
+ /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
+ if (map->fd == map_fd)
+ return 0;
+
+ /* Keep placeholder FD value but now point it to the BPF map object.
+ * This way everything that relied on this map's FD (e.g., relocated
+ * ldimm64 instructions) will stay valid and won't need adjustments.
+ * map->fd stays valid but now point to what map_fd points to.
+ */
+ return reuse_fd(map->fd, map_fd);
}
static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
@@ -5307,7 +5278,7 @@ static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
continue;
targ_map = map->init_slots[i];
- fd = bpf_map__fd(targ_map);
+ fd = targ_map->fd;
if (obj->gen_loader) {
bpf_gen__populate_outer_map(obj->gen_loader,
@@ -5377,10 +5348,8 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
continue;
err = init_prog_array_slots(obj, map);
- if (err < 0) {
- zclose(map->fd);
+ if (err < 0)
return err;
- }
}
return 0;
}
@@ -5458,7 +5427,7 @@ retry:
}
}
- if (map->fd >= 0) {
+ if (map->reused) {
pr_debug("map '%s': skipping creation (preset fd=%d)\n",
map->name, map->fd);
} else {
@@ -5471,25 +5440,20 @@ retry:
if (bpf_map__is_internal(map)) {
err = bpf_object__populate_internal_map(obj, map);
- if (err < 0) {
- zclose(map->fd);
+ if (err < 0)
goto err_out;
- }
}
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
err = init_map_in_map_slots(obj, map);
- if (err < 0) {
- zclose(map->fd);
+ if (err < 0)
goto err_out;
- }
}
}
if (map->pin_path && !map->pinned) {
err = bpf_map__pin(map, NULL);
if (err) {
- zclose(map->fd);
if (!retried && err == -EEXIST) {
retried = true;
goto retry;
@@ -6222,7 +6186,7 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj,
int err;
/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
- * supprot func/line info
+ * support func/line info
*/
if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
return 0;
@@ -6622,8 +6586,471 @@ static void bpf_object__sort_relos(struct bpf_object *obj)
}
}
-static int
-bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
+static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
+{
+ const char *str = "exception_callback:";
+ size_t pfx_len = strlen(str);
+ int i, j, n;
+
+ if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
+ return 0;
+
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
+ const char *name;
+ struct btf_type *t;
+
+ t = btf_type_by_id(obj->btf, i);
+ if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
+ continue;
+
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strncmp(name, str, pfx_len) != 0)
+ continue;
+
+ t = btf_type_by_id(obj->btf, t->type);
+ if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
+ pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
+ prog->name);
+ return -EINVAL;
+ }
+ if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
+ continue;
+ /* Multiple callbacks are specified for the same prog,
+ * the verifier will eventually return an error for this
+ * case, hence simply skip appending a subprog.
+ */
+ if (prog->exception_cb_idx >= 0) {
+ prog->exception_cb_idx = -1;
+ break;
+ }
+
+ name += pfx_len;
+ if (str_is_empty(name)) {
+ pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
+ prog->name);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < obj->nr_programs; j++) {
+ struct bpf_program *subprog = &obj->programs[j];
+
+ if (!prog_is_subprog(obj, subprog))
+ continue;
+ if (strcmp(name, subprog->name) != 0)
+ continue;
+ /* Enforce non-hidden, as from verifier point of
+ * view it expects global functions, whereas the
+ * mark_btf_static fixes up linkage as static.
+ */
+ if (!subprog->sym_global || subprog->mark_btf_static) {
+ pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
+ prog->name, subprog->name);
+ return -EINVAL;
+ }
+ /* Let's see if we already saw a static exception callback with the same name */
+ if (prog->exception_cb_idx >= 0) {
+ pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
+ prog->name, subprog->name);
+ return -EINVAL;
+ }
+ prog->exception_cb_idx = j;
+ break;
+ }
+
+ if (prog->exception_cb_idx >= 0)
+ continue;
+
+ pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static struct {
+ enum bpf_prog_type prog_type;
+ const char *ctx_name;
+} global_ctx_map[] = {
+ { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
+ { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
+ { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
+ { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
+ { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
+ { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
+ { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
+ { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
+ { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
+ { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
+ { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
+ { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
+ { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
+ { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
+ { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
+ { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
+ { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
+ { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
+ { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
+ { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
+ { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
+ { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
+ { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
+ { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
+ { BPF_PROG_TYPE_XDP, "xdp_md" },
+ /* all other program types don't have "named" context structs */
+};
+
+/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
+ * for below __builtin_types_compatible_p() checks;
+ * with this approach we don't need any extra arch-specific #ifdef guards
+ */
+struct pt_regs;
+struct user_pt_regs;
+struct user_regs_struct;
+
+static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
+ const char *subprog_name, int arg_idx,
+ int arg_type_id, const char *ctx_name)
+{
+ const struct btf_type *t;
+ const char *tname;
+
+ /* check if existing parameter already matches verifier expectations */
+ t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
+ if (!btf_is_ptr(t))
+ goto out_warn;
+
+ /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
+ * and perf_event programs, so check this case early on and forget
+ * about it for subsequent checks
+ */
+ while (btf_is_mod(t))
+ t = btf__type_by_id(btf, t->type);
+ if (btf_is_typedef(t) &&
+ (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
+ tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
+ if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
+ return false; /* canonical type for kprobe/perf_event */
+ }
+
+ /* now we can ignore typedefs moving forward */
+ t = skip_mods_and_typedefs(btf, t->type, NULL);
+
+ /* if it's `void *`, definitely fix up BTF info */
+ if (btf_is_void(t))
+ return true;
+
+ /* if it's already proper canonical type, no need to fix up */
+ tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
+ if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
+ return false;
+
+ /* special cases */
+ switch (prog->type) {
+ case BPF_PROG_TYPE_KPROBE:
+ /* `struct pt_regs *` is expected, but we need to fix up */
+ if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+ return true;
+ break;
+ case BPF_PROG_TYPE_PERF_EVENT:
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
+ btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
+ return true;
+ if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
+ btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
+ return true;
+ break;
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
+ /* allow u64* as ctx */
+ if (btf_is_int(t) && t->size == 8)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+out_warn:
+ pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
+ prog->name, subprog_name, arg_idx, ctx_name);
+ return false;
+}
+
+static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
+{
+ int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
+ int i, err, arg_cnt, fn_name_off, linkage;
+ struct btf_type *fn_t, *fn_proto_t, *t;
+ struct btf_param *p;
+
+ /* caller already validated FUNC -> FUNC_PROTO validity */
+ fn_t = btf_type_by_id(btf, orig_fn_id);
+ fn_proto_t = btf_type_by_id(btf, fn_t->type);
+
+ /* Note that each btf__add_xxx() operation invalidates
+ * all btf_type and string pointers, so we need to be
+ * very careful when cloning BTF types. BTF type
+ * pointers have to be always refetched. And to avoid
+ * problems with invalidated string pointers, we
+ * add empty strings initially, then just fix up
+ * name_off offsets in place. Offsets are stable for
+ * existing strings, so that works out.
+ */
+ fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
+ linkage = btf_func_linkage(fn_t);
+ orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
+ ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
+ arg_cnt = btf_vlen(fn_proto_t);
+
+ /* clone FUNC_PROTO and its params */
+ fn_proto_id = btf__add_func_proto(btf, ret_type_id);
+ if (fn_proto_id < 0)
+ return -EINVAL;
+
+ for (i = 0; i < arg_cnt; i++) {
+ int name_off;
+
+ /* copy original parameter data */
+ t = btf_type_by_id(btf, orig_proto_id);
+ p = &btf_params(t)[i];
+ name_off = p->name_off;
+
+ err = btf__add_func_param(btf, "", p->type);
+ if (err)
+ return err;
+
+ fn_proto_t = btf_type_by_id(btf, fn_proto_id);
+ p = &btf_params(fn_proto_t)[i];
+ p->name_off = name_off; /* use remembered str offset */
+ }
+
+ /* clone FUNC now, btf__add_func() enforces non-empty name, so use
+ * entry program's name as a placeholder, which we replace immediately
+ * with original name_off
+ */
+ fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
+ if (fn_id < 0)
+ return -EINVAL;
+
+ fn_t = btf_type_by_id(btf, fn_id);
+ fn_t->name_off = fn_name_off; /* reuse original string */
+
+ return fn_id;
+}
+
+static int probe_kern_arg_ctx_tag(void)
+{
+ /* To minimize merge conflicts with BPF token series that refactors
+ * feature detection code a lot, we don't integrate
+ * probe_kern_arg_ctx_tag() into kernel_supports() feature-detection
+ * framework yet, doing our own caching internally.
+ * This will be cleaned up a bit later when bpf/bpf-next trees settle.
+ */
+ static int cached_result = -1;
+ static const char strs[] = "\0a\0b\0arg:ctx\0";
+ const __u32 types[] = {
+ /* [1] INT */
+ BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
+ /* [2] PTR -> VOID */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
+ /* [3] FUNC_PROTO `int(void *a)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(1 /* "a" */, 2),
+ /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
+ BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
+ /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(3 /* "b" */, 2),
+ /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
+ BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
+ /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
+ BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
+ };
+ const struct bpf_insn insns[] = {
+ /* main prog */
+ BPF_CALL_REL(+1),
+ BPF_EXIT_INSN(),
+ /* global subprog */
+ BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
+ BPF_EXIT_INSN(),
+ };
+ const struct bpf_func_info_min func_infos[] = {
+ { 0, 4 }, /* main prog -> FUNC 'a' */
+ { 2, 6 }, /* subprog -> FUNC 'b' */
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
+
+ if (cached_result >= 0)
+ return cached_result;
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
+ if (btf_fd < 0)
+ return 0;
+
+ opts.prog_btf_fd = btf_fd;
+ opts.func_info = &func_infos;
+ opts.func_info_cnt = ARRAY_SIZE(func_infos);
+ opts.func_info_rec_size = sizeof(func_infos[0]);
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
+ "GPL", insns, insn_cnt, &opts);
+ close(btf_fd);
+
+ cached_result = probe_fd(prog_fd);
+ return cached_result;
+}
+
+/* Check if main program or global subprog's function prototype has `arg:ctx`
+ * argument tags, and, if necessary, substitute correct type to match what BPF
+ * verifier would expect, taking into account specific program type. This
+ * allows to support __arg_ctx tag transparently on old kernels that don't yet
+ * have a native support for it in the verifier, making user's life much
+ * easier.
+ */
+static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
+{
+ const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
+ struct bpf_func_info_min *func_rec;
+ struct btf_type *fn_t, *fn_proto_t;
+ struct btf *btf = obj->btf;
+ const struct btf_type *t;
+ struct btf_param *p;
+ int ptr_id = 0, struct_id, tag_id, orig_fn_id;
+ int i, n, arg_idx, arg_cnt, err, rec_idx;
+ int *orig_ids;
+
+ /* no .BTF.ext, no problem */
+ if (!obj->btf_ext || !prog->func_info)
+ return 0;
+
+ /* don't do any fix ups if kernel natively supports __arg_ctx */
+ if (probe_kern_arg_ctx_tag() > 0)
+ return 0;
+
+ /* some BPF program types just don't have named context structs, so
+ * this fallback mechanism doesn't work for them
+ */
+ for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
+ if (global_ctx_map[i].prog_type != prog->type)
+ continue;
+ ctx_name = global_ctx_map[i].ctx_name;
+ break;
+ }
+ if (!ctx_name)
+ return 0;
+
+ /* remember original func BTF IDs to detect if we already cloned them */
+ orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
+ if (!orig_ids)
+ return -ENOMEM;
+ for (i = 0; i < prog->func_info_cnt; i++) {
+ func_rec = prog->func_info + prog->func_info_rec_size * i;
+ orig_ids[i] = func_rec->type_id;
+ }
+
+ /* go through each DECL_TAG with "arg:ctx" and see if it points to one
+ * of our subprogs; if yes and subprog is global and needs adjustment,
+ * clone and adjust FUNC -> FUNC_PROTO combo
+ */
+ for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
+ /* only DECL_TAG with "arg:ctx" value are interesting */
+ t = btf__type_by_id(btf, i);
+ if (!btf_is_decl_tag(t))
+ continue;
+ if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
+ continue;
+
+ /* only global funcs need adjustment, if at all */
+ orig_fn_id = t->type;
+ fn_t = btf_type_by_id(btf, orig_fn_id);
+ if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
+ continue;
+
+ /* sanity check FUNC -> FUNC_PROTO chain, just in case */
+ fn_proto_t = btf_type_by_id(btf, fn_t->type);
+ if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
+ continue;
+
+ /* find corresponding func_info record */
+ func_rec = NULL;
+ for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
+ if (orig_ids[rec_idx] == t->type) {
+ func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
+ break;
+ }
+ }
+ /* current main program doesn't call into this subprog */
+ if (!func_rec)
+ continue;
+
+ /* some more sanity checking of DECL_TAG */
+ arg_cnt = btf_vlen(fn_proto_t);
+ arg_idx = btf_decl_tag(t)->component_idx;
+ if (arg_idx < 0 || arg_idx >= arg_cnt)
+ continue;
+
+ /* check if we should fix up argument type */
+ p = &btf_params(fn_proto_t)[arg_idx];
+ fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
+ if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
+ continue;
+
+ /* clone fn/fn_proto, unless we already did it for another arg */
+ if (func_rec->type_id == orig_fn_id) {
+ int fn_id;
+
+ fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
+ if (fn_id < 0) {
+ err = fn_id;
+ goto err_out;
+ }
+
+ /* point func_info record to a cloned FUNC type */
+ func_rec->type_id = fn_id;
+ }
+
+ /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
+ * we do it just once per main BPF program, as all global
+ * funcs share the same program type, so need only PTR ->
+ * STRUCT type chain
+ */
+ if (ptr_id == 0) {
+ struct_id = btf__add_struct(btf, ctx_name, 0);
+ ptr_id = btf__add_ptr(btf, struct_id);
+ if (ptr_id < 0 || struct_id < 0) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
+
+ /* for completeness, clone DECL_TAG and point it to cloned param */
+ tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
+ if (tag_id < 0) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /* all the BTF manipulations invalidated pointers, refetch them */
+ fn_t = btf_type_by_id(btf, func_rec->type_id);
+ fn_proto_t = btf_type_by_id(btf, fn_t->type);
+
+ /* fix up type ID pointed to by param */
+ p = &btf_params(fn_proto_t)[arg_idx];
+ p->type = ptr_id;
+ }
+
+ free(orig_ids);
+ return 0;
+err_out:
+ free(orig_ids);
+ return err;
+}
+
+static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
size_t i, j;
@@ -6682,6 +7109,9 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
+ err = bpf_prog_assign_exc_cb(obj, prog);
+ if (err)
+ return err;
/* Now, also append exception callback if it has not been done already. */
if (prog->exception_cb_idx >= 0) {
struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
@@ -6701,19 +7131,28 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
}
}
}
- /* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
if (!prog->autoload)
continue;
+
+ /* Process data relos for main programs */
err = bpf_object__relocate_data(obj, prog);
if (err) {
pr_warn("prog '%s': failed to relocate data references: %d\n",
prog->name, err);
return err;
}
+
+ /* Fix up .BTF.ext information, if necessary */
+ err = bpf_program_fixup_func_info(obj, prog);
+ if (err) {
+ pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
+ prog->name, err);
+ return err;
+ }
}
return 0;
@@ -7043,7 +7482,7 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_ifindex = prog->prog_ifindex;
/* specify func_info/line_info only if kernel supports them */
- btf_fd = bpf_object__btf_fd(obj);
+ btf_fd = btf__fd(obj->btf);
if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
@@ -7128,7 +7567,7 @@ retry_load:
if (map->libbpf_type != LIBBPF_MAP_RODATA)
continue;
- if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
+ if (bpf_prog_bind_map(ret, map->fd, NULL)) {
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': failed to bind map '%s': %s\n",
prog->name, map->real_name, cp);
@@ -8060,11 +8499,11 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
- err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
- err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+ err = err ? : bpf_object__sanitize_and_load_btf(obj);
+ err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__load_progs(obj, extra_log_level);
err = err ? : bpf_object_init_prog_arrays(obj);
err = err ? : bpf_object_prepare_struct_ops(obj);
@@ -8073,8 +8512,6 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
/* reset FDs */
if (obj->btf)
btf__set_fd(obj->btf, -1);
- for (i = 0; i < obj->nr_maps; i++)
- obj->maps[i].fd = -1;
if (!err)
err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
}
@@ -9594,7 +10031,11 @@ int libbpf_attach_type_by_name(const char *name,
int bpf_map__fd(const struct bpf_map *map)
{
- return map ? map->fd : libbpf_err(-EINVAL);
+ if (!map)
+ return libbpf_err(-EINVAL);
+ if (!map_is_created(map))
+ return -1;
+ return map->fd;
}
static bool map_uses_real_name(const struct bpf_map *map)
@@ -9630,7 +10071,7 @@ enum bpf_map_type bpf_map__type(const struct bpf_map *map)
int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->def.type = type;
return 0;
@@ -9643,7 +10084,7 @@ __u32 bpf_map__map_flags(const struct bpf_map *map)
int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->def.map_flags = flags;
return 0;
@@ -9656,7 +10097,7 @@ __u64 bpf_map__map_extra(const struct bpf_map *map)
int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->map_extra = map_extra;
return 0;
@@ -9669,7 +10110,7 @@ __u32 bpf_map__numa_node(const struct bpf_map *map)
int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->numa_node = numa_node;
return 0;
@@ -9682,7 +10123,7 @@ __u32 bpf_map__key_size(const struct bpf_map *map)
int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->def.key_size = size;
return 0;
@@ -9766,7 +10207,7 @@ static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
{
- if (map->fd >= 0)
+ if (map->obj->loaded || map->reused)
return libbpf_err(-EBUSY);
if (map->mmaped) {
@@ -9807,8 +10248,11 @@ __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size)
{
+ if (map->obj->loaded || map->reused)
+ return libbpf_err(-EBUSY);
+
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
- size != map->def.value_size || map->fd >= 0)
+ size != map->def.value_size)
return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
@@ -9835,7 +10279,7 @@ __u32 bpf_map__ifindex(const struct bpf_map *map)
int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
{
- if (map->fd >= 0)
+ if (map_is_created(map))
return libbpf_err(-EBUSY);
map->map_ifindex = ifindex;
return 0;
@@ -9940,7 +10384,7 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
static int validate_map_op(const struct bpf_map *map, size_t key_sz,
size_t value_sz, bool check_value_sz)
{
- if (map->fd <= 0)
+ if (!map_is_created(map)) /* map is not yet created */
return -ENOENT;
if (map->def.key_size != key_sz) {
@@ -11453,7 +11897,7 @@ bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
return libbpf_err_ptr(err);
offsets = resolved_offsets;
} else if (syms) {
- err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets);
+ err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
if (err < 0)
return libbpf_err_ptr(err);
offsets = resolved_offsets;
@@ -12393,7 +12837,7 @@ int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
__u32 zero = 0;
int err;
- if (!bpf_map__is_struct_ops(map) || map->fd < 0)
+ if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
return -EINVAL;
st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
@@ -13297,7 +13741,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- int prot, map_fd = bpf_map__fd(map);
+ int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
if (!mmaped)
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b52dc28dc8..91c5aef7da 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -409,3 +409,6 @@ LIBBPF_1.3.0 {
ring__size;
ring_buffer__ring;
} LIBBPF_1.2.0;
+
+LIBBPF_1.4.0 {
+} LIBBPF_1.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 57dec645d6..878a0d0b51 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -569,6 +569,20 @@ static inline int ensure_good_fd(int fd)
return fd;
}
+/* Point *fixed_fd* to the same file that *tmp_fd* points to.
+ * Regardless of success, *tmp_fd* is closed.
+ * Whatever *fixed_fd* pointed to is closed silently.
+ */
+static inline int reuse_fd(int fixed_fd, int tmp_fd)
+{
+ int err;
+
+ err = dup2(tmp_fd, fixed_fd);
+ err = err < 0 ? -errno : 0;
+ close(tmp_fd); /* clean up temporary FD */
+ return err;
+}
+
/* The following two functions are exposed to bpftool */
int bpf_core_add_cands(struct bpf_core_cand *local_cand,
size_t local_essent_len,
@@ -608,7 +622,8 @@ int elf_open(const char *binary_path, struct elf_fd *elf_fd);
void elf_close(struct elf_fd *elf_fd);
int elf_resolve_syms_offsets(const char *binary_path, int cnt,
- const char **syms, unsigned long **poffsets);
+ const char **syms, unsigned long **poffsets,
+ int st_type);
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
unsigned long **poffsets, size_t *pcnt);
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
index 290411ddb3..e783a47da8 100644
--- a/tools/lib/bpf/libbpf_version.h
+++ b/tools/lib/bpf/libbpf_version.h
@@ -4,6 +4,6 @@
#define __LIBBPF_VERSION_H
#define LIBBPF_MAJOR_VERSION 1
-#define LIBBPF_MINOR_VERSION 3
+#define LIBBPF_MINOR_VERSION 4
#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 5ced96d99f..16bca56002 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -719,13 +719,28 @@ static int linker_sanity_check_elf(struct src_obj *obj)
return -EINVAL;
}
- if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign))
+ if (is_dwarf_sec_name(sec->sec_name))
+ continue;
+
+ if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign)) {
+ pr_warn("ELF section #%zu alignment %llu is non pow-of-2 alignment in %s\n",
+ sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
+ obj->filename);
return -EINVAL;
- if (sec->shdr->sh_addralign != sec->data->d_align)
+ }
+ if (sec->shdr->sh_addralign != sec->data->d_align) {
+ pr_warn("ELF section #%zu has inconsistent alignment addr=%llu != d=%llu in %s\n",
+ sec->sec_idx, (long long unsigned)sec->shdr->sh_addralign,
+ (long long unsigned)sec->data->d_align, obj->filename);
return -EINVAL;
+ }
- if (sec->shdr->sh_size != sec->data->d_size)
+ if (sec->shdr->sh_size != sec->data->d_size) {
+ pr_warn("ELF section #%zu has inconsistent section size sh=%llu != d=%llu in %s\n",
+ sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
+ (long long unsigned)sec->data->d_size, obj->filename);
return -EINVAL;
+ }
switch (sec->shdr->sh_type) {
case SHT_SYMTAB:
@@ -737,8 +752,12 @@ static int linker_sanity_check_elf(struct src_obj *obj)
break;
case SHT_PROGBITS:
if (sec->shdr->sh_flags & SHF_EXECINSTR) {
- if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0)
+ if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0) {
+ pr_warn("ELF section #%zu has unexpected size alignment %llu in %s\n",
+ sec->sec_idx, (long long unsigned)sec->shdr->sh_size,
+ obj->filename);
return -EINVAL;
+ }
}
break;
case SHT_NOBITS:
diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c
index 8e1a926a9c..bc142f0664 100644
--- a/tools/lib/perf/Documentation/examples/sampling.c
+++ b/tools/lib/perf/Documentation/examples/sampling.c
@@ -39,7 +39,7 @@ int main(int argc, char **argv)
libperf_init(libperf_print);
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus) {
fprintf(stderr, "failed to create cpus\n");
return -1;
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
index d6ca24f6ef..2378980fab 100644
--- a/tools/lib/perf/Documentation/libperf-sampling.txt
+++ b/tools/lib/perf/Documentation/libperf-sampling.txt
@@ -97,7 +97,7 @@ In this case we will monitor all the available CPUs:
[source,c]
--
- 42 cpus = perf_cpu_map__new(NULL);
+ 42 cpus = perf_cpu_map__new_online_cpus();
43 if (!cpus) {
44 fprintf(stderr, "failed to create cpus\n");
45 return -1;
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index a8f1a23793..fcfb9499ef 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -37,7 +37,7 @@ SYNOPSIS
struct perf_cpu_map;
- struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+ struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
struct perf_cpu_map *perf_cpu_map__read(FILE *file);
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -46,7 +46,7 @@ SYNOPSIS
void perf_cpu_map__put(struct perf_cpu_map *map);
int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
- bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+ bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
int perf_cpu_map__max(struct perf_cpu_map *map);
bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index 2a5a292173..4adcd7920d 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -9,6 +9,7 @@
#include <unistd.h>
#include <ctype.h>
#include <limits.h>
+#include "internal.h"
void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
{
@@ -27,7 +28,7 @@ struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
return result;
}
-struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+struct perf_cpu_map *perf_cpu_map__new_any_cpu(void)
{
struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
@@ -66,15 +67,21 @@ void perf_cpu_map__put(struct perf_cpu_map *map)
}
}
-static struct perf_cpu_map *cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysconf(void)
{
struct perf_cpu_map *cpus;
- int nr_cpus;
+ int nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (nr_cpus < 0)
return NULL;
+ nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF);
+ if (nr_cpus != nr_cpus_conf) {
+ pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
+ nr_cpus, nr_cpus_conf, nr_cpus);
+ }
+
cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
int i;
@@ -86,9 +93,27 @@ static struct perf_cpu_map *cpu_map__default_new(void)
return cpus;
}
-struct perf_cpu_map *perf_cpu_map__default_new(void)
+static struct perf_cpu_map *cpu_map__new_sysfs_online(void)
{
- return cpu_map__default_new();
+ struct perf_cpu_map *cpus = NULL;
+ FILE *onlnf;
+
+ onlnf = fopen("/sys/devices/system/cpu/online", "r");
+ if (onlnf) {
+ cpus = perf_cpu_map__read(onlnf);
+ fclose(onlnf);
+ }
+ return cpus;
+}
+
+struct perf_cpu_map *perf_cpu_map__new_online_cpus(void)
+{
+ struct perf_cpu_map *cpus = cpu_map__new_sysfs_online();
+
+ if (cpus)
+ return cpus;
+
+ return cpu_map__new_sysconf();
}
@@ -180,27 +205,11 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
- cpus = cpu_map__default_new();
out_free_tmp:
free(tmp_cpus);
return cpus;
}
-static struct perf_cpu_map *cpu_map__read_all_cpu_map(void)
-{
- struct perf_cpu_map *cpus = NULL;
- FILE *onlnf;
-
- onlnf = fopen("/sys/devices/system/cpu/online", "r");
- if (!onlnf)
- return cpu_map__default_new();
-
- cpus = perf_cpu_map__read(onlnf);
- fclose(onlnf);
- return cpus;
-}
-
struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
{
struct perf_cpu_map *cpus = NULL;
@@ -211,7 +220,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
int max_entries = 0;
if (!cpu_list)
- return cpu_map__read_all_cpu_map();
+ return perf_cpu_map__new_online_cpus();
/*
* must handle the case of empty cpumap to cover
@@ -268,10 +277,12 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else if (*cpu_list != '\0')
- cpus = cpu_map__default_new();
- else
- cpus = perf_cpu_map__dummy_new();
+ else if (*cpu_list != '\0') {
+ pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.",
+ cpu_list);
+ cpus = perf_cpu_map__new_online_cpus();
+ } else
+ cpus = perf_cpu_map__new_any_cpu();
invalid:
free(tmp_cpus);
out:
@@ -300,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
return cpus ? __perf_cpu_map__nr(cpus) : 1;
}
-bool perf_cpu_map__empty(const struct perf_cpu_map *map)
+bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map)
{
return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true;
}
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 3acbbccc19..c6d67fc9e5 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -39,7 +39,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
if (evsel->system_wide) {
/* System wide: set the cpu map of the evsel to all online CPUs. */
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__new(NULL);
+ evsel->cpus = perf_cpu_map__new_online_cpus();
} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
/*
* User requested CPUs on a core PMU, ensure the requested CPUs
@@ -248,10 +248,10 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist)
static void perf_evlist__id_hash(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
int hash;
- struct perf_sample_id *sid = SID(evsel, cpu, thread);
+ struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
sid->id = id;
sid->evsel = evsel;
@@ -269,21 +269,27 @@ void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id)
+ int cpu_map_idx, int thread, u64 id)
{
- perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
+ if (!SID(evsel, cpu_map_idx, thread))
+ return;
+
+ perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
evsel->id[evsel->ids++] = id;
}
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd)
+ int cpu_map_idx, int thread, int fd)
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
u64 id;
int ret;
+ if (!SID(evsel, cpu_map_idx, thread))
+ return -1;
+
ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
if (!ret)
goto add;
@@ -312,7 +318,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
id = read_data[id_idx];
add:
- perf_evlist__id_add(evlist, evsel, cpu, thread, id);
+ perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
return 0;
}
@@ -619,7 +625,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
/* One for each CPU */
nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
- if (perf_cpu_map__empty(evlist->all_cpus)) {
+ if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
/* Plus one for each thread */
nr_mmaps += perf_thread_map__nr(evlist->threads);
/* Minus the per-thread CPU (-1) */
@@ -653,7 +659,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
- if (perf_cpu_map__empty(cpus))
+ if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
return mmap_per_thread(evlist, ops, mp);
return mmap_per_cpu(evlist, ops, mp);
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 8b51b008a8..c071609532 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -120,7 +120,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
static struct perf_cpu_map *empty_cpu_map;
if (empty_cpu_map == NULL) {
- empty_cpu_map = perf_cpu_map__dummy_new();
+ empty_cpu_map = perf_cpu_map__new_any_cpu();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index d86ffe8ed4..f43bdb9b62 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -126,11 +126,11 @@ u64 perf_evlist__read_format(struct perf_evlist *evlist);
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, u64 id);
+ int cpu_map_idx, int thread, u64 id);
int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
- int cpu, int thread, int fd);
+ int cpu_map_idx, int thread, int fd);
void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index 5a062af8e9..5f08cab61e 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -33,7 +33,8 @@ struct perf_mmap {
bool overwrite;
u64 flush;
libperf_unmap_cb_t unmap_cb;
- char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+ void *event_copy;
+ size_t event_copy_sz;
struct perf_mmap *next;
};
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index e38d859a38..228c6c629b 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -19,10 +19,23 @@ struct perf_cache {
struct perf_cpu_map;
/**
- * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value.
+ * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void);
+/**
+ * perf_cpu_map__new_online_cpus - a map read from
+ * /sys/devices/system/cpu/online if
+ * available. If reading wasn't possible a map
+ * is created using the online processors
+ * assuming the first 'n' processors are all
+ * online.
+ */
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void);
+/**
+ * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no
+ * cpu_list argument is provided then
+ * perf_cpu_map__new_online_cpus is returned.
*/
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
-LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
@@ -31,12 +44,23 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
+/**
+ * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index
+ * is invalid.
+ */
LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+/**
+ * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a
+ * cpu of -1 for an invalid index, this makes an empty map
+ * look like it contains the "any CPU"/dummy value. Otherwise
+ * the result is the number CPUs in the map plus one if the
+ * "any CPU"/dummy value is present.
+ */
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
/**
- * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value.
+ * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value.
*/
-LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs,
@@ -51,6 +75,12 @@ LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map);
(idx) < perf_cpu_map__nr(cpus); \
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+#define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \
+ for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \
+ (idx) < perf_cpu_map__nr(cpus); \
+ (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \
+ if ((_cpu).cpu != -1)
+
#define perf_cpu_map__for_each_idx(idx, cpus) \
for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 190b56ae92..10b3f37226 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -1,15 +1,15 @@
LIBPERF_0.0.1 {
global:
libperf_init;
- perf_cpu_map__dummy_new;
- perf_cpu_map__default_new;
+ perf_cpu_map__new_any_cpu;
+ perf_cpu_map__new_online_cpus;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
perf_cpu_map__read;
perf_cpu_map__nr;
perf_cpu_map__cpu;
- perf_cpu_map__empty;
+ perf_cpu_map__has_any_cpu_or_is_empty;
perf_cpu_map__max;
perf_cpu_map__has;
perf_thread_map__new_array;
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 2184814b37..0c903c2372 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -19,6 +19,7 @@
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb)
{
+ /* Assume fields were zero initialized. */
map->fd = -1;
map->overwrite = overwrite;
map->unmap_cb = unmap_cb;
@@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
void perf_mmap__munmap(struct perf_mmap *map)
{
- if (map && map->base != NULL) {
+ if (!map)
+ return;
+
+ zfree(&map->event_copy);
+ map->event_copy_sz = 0;
+ if (map->base) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
map->fd = -1;
refcount_set(&map->refcnt, 0);
}
- if (map && map->unmap_cb)
+ if (map->unmap_cb)
map->unmap_cb(map);
}
@@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map,
*/
if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
unsigned int offset = *startp;
- unsigned int len = min(sizeof(*event), size), cpy;
+ unsigned int len = size, cpy;
void *dst = map->event_copy;
+ if (size > map->event_copy_sz) {
+ dst = realloc(map->event_copy, size);
+ if (!dst)
+ return NULL;
+ map->event_copy = dst;
+ map->event_copy_sz = size;
+ }
+
do {
cpy = min(map->mask + 1 - (offset & map->mask), len);
memcpy(dst, &data[offset & map->mask], cpy);
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index 87b0510a55..c998b1dae8 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -21,7 +21,7 @@ int test_cpumap(int argc, char **argv)
libperf_init(libperf_print);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
if (!cpus)
return -1;
@@ -29,7 +29,7 @@ int test_cpumap(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
- cpus = perf_cpu_map__default_new();
+ cpus = perf_cpu_map__new_online_cpus();
if (!cpus)
return -1;
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index ed616fc19b..10f70cb41f 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -46,7 +46,7 @@ static int test_stat_cpu(void)
};
int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
@@ -261,7 +261,7 @@ static int test_mmap_thread(void)
threads = perf_thread_map__new_dummy();
__T("failed to create threads", threads);
- cpus = perf_cpu_map__dummy_new();
+ cpus = perf_cpu_map__new_any_cpu();
__T("failed to create cpus", cpus);
perf_thread_map__set_pid(threads, 0, pid);
@@ -350,7 +350,7 @@ static int test_mmap_cpus(void)
attr.config = id;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evlist = perf_evlist__new();
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index a11fc51bfb..545ec31505 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -27,7 +27,7 @@ static int test_stat_cpu(void)
};
int err, idx;
- cpus = perf_cpu_map__new(NULL);
+ cpus = perf_cpu_map__new_online_cpus();
__T("failed to create cpus", cpus);
evsel = perf_evsel__new(&attr);