summaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /tools/lib
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile2
-rw-r--r--tools/lib/bpf/bpf.c44
-rw-r--r--tools/lib/bpf/bpf.h77
-rw-r--r--tools/lib/bpf/bpf_core_read.h60
-rw-r--r--tools/lib/bpf/bpf_helpers.h4
-rw-r--r--tools/lib/bpf/btf.c43
-rw-r--r--tools/lib/bpf/elf.c2
-rw-r--r--tools/lib/bpf/features.c583
-rw-r--r--tools/lib/bpf/libbpf.c1137
-rw-r--r--tools/lib/bpf/libbpf.h23
-rw-r--r--tools/lib/bpf/libbpf.map6
-rw-r--r--tools/lib/bpf/libbpf_internal.h54
-rw-r--r--tools/lib/bpf/libbpf_probes.c19
-rw-r--r--tools/lib/bpf/linker.c2
-rw-r--r--tools/lib/bpf/str_error.h3
-rw-r--r--tools/lib/subcmd/parse-options.c8
-rw-r--r--tools/lib/subcmd/run-command.c2
-rw-r--r--tools/lib/subcmd/run-command.h2
19 files changed, 1408 insertions, 665 deletions
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 2d0c282c85..b6619199a7 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,4 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
- usdt.o zip.o elf.o
+ usdt.o zip.o elf.o features.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 4be7144e48..2cf8927743 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -2,7 +2,7 @@
# Most of this file is copied from tools/lib/traceevent/Makefile
RM ?= rm
-srctree = $(abs_srctree)
+srctree := $(realpath $(srctree))
VERSION_SCRIPT := libbpf.map
LIBBPF_VERSION := $(shell \
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 9dc9625651..145b8bd1d5 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -103,9 +103,9 @@ int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
* [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
* [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
*/
-int probe_memcg_account(void)
+int probe_memcg_account(int token_fd)
{
- const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
struct bpf_insn insns[] = {
BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
BPF_EXIT_INSN(),
@@ -120,6 +120,9 @@ int probe_memcg_account(void)
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
if (prog_fd >= 0) {
@@ -146,7 +149,7 @@ int bump_rlimit_memlock(void)
struct rlimit rlim;
/* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
- if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
return 0;
memlock_bumped = true;
@@ -169,7 +172,7 @@ int bpf_map_create(enum bpf_map_type map_type,
__u32 max_entries,
const struct bpf_map_create_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
union bpf_attr attr;
int fd;
@@ -181,7 +184,7 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err(-EINVAL);
attr.map_type = map_type;
- if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
attr.value_size = value_size;
@@ -191,6 +194,7 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
+ attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
attr.map_flags = OPTS_GET(opts, map_flags, 0);
@@ -198,6 +202,8 @@ int bpf_map_create(enum bpf_map_type map_type,
attr.numa_node = OPTS_GET(opts, numa_node, 0);
attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
+ attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
+
fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
return libbpf_err_errno(fd);
}
@@ -232,7 +238,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
const struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
@@ -261,8 +267,9 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0);
+ attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
- if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
attr.license = ptr_to_u64(license);
@@ -1182,7 +1189,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
- const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
union bpf_attr attr;
char *log_buf;
size_t log_size;
@@ -1207,6 +1214,10 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts
attr.btf = ptr_to_u64(btf_data);
attr.btf_size = btf_size;
+
+ attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
+ attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
+
/* log_level == 0 and log_buf != NULL means "try loading without
* log_buf, but retry with log_buf and log_level=1 on error", which is
* consistent across low-level and high-level BTF and program loading
@@ -1287,3 +1298,20 @@ int bpf_prog_bind_map(int prog_fd, int map_fd,
ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
return libbpf_err_errno(ret);
}
+
+int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, token_create);
+ union bpf_attr attr;
+ int fd;
+
+ if (!OPTS_VALID(opts, bpf_token_create_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.token_create.bpffs_fd = bpffs_fd;
+ attr.token_create.flags = OPTS_GET(opts, flags, 0);
+
+ fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index dad7917903..df0db2f0cd 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -51,8 +51,12 @@ struct bpf_map_create_opts {
__u32 numa_node;
__u32 map_ifindex;
+ __s32 value_type_btf_obj_fd;
+
+ __u32 token_fd;
+ size_t :0;
};
-#define bpf_map_create_opts__last_field map_ifindex
+#define bpf_map_create_opts__last_field token_fd
LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
const char *map_name,
@@ -102,9 +106,10 @@ struct bpf_prog_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_prog_load_opts__last_field log_true_size
+#define bpf_prog_load_opts__last_field token_fd
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
@@ -130,9 +135,12 @@ struct bpf_btf_load_opts {
* If kernel doesn't support this feature, log_size is left unchanged.
*/
__u32 log_true_size;
+
+ __u32 btf_flags;
+ __u32 token_fd;
size_t :0;
};
-#define bpf_btf_load_opts__last_field log_true_size
+#define bpf_btf_load_opts__last_field token_fd
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
struct bpf_btf_load_opts *opts);
@@ -182,10 +190,14 @@ LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
/**
* @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
*
- * The parameter *in_batch* is the address of the first element in the batch to read.
- * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
- * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
- * that the batched lookup starts from the beginning of the map.
+ * The parameter *in_batch* is the address of the first element in the batch to
+ * read. *out_batch* is an output parameter that should be passed as *in_batch*
+ * to subsequent calls to **bpf_map_lookup_batch()**. NULL can be passed for
+ * *in_batch* to indicate that the batched lookup starts from the beginning of
+ * the map. Both *in_batch* and *out_batch* must point to memory large enough to
+ * hold a single key, except for maps of type **BPF_MAP_TYPE_{HASH, PERCPU_HASH,
+ * LRU_HASH, LRU_PERCPU_HASH}**, for which the memory size must be at
+ * least 4 bytes wide regardless of key size.
*
* The *keys* and *values* are output parameters which must point to memory large enough to
* hold *count* items based on the key and value size of the map *map_fd*. The *keys*
@@ -218,7 +230,10 @@ LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
*
* @param fd BPF map file descriptor
* @param in_batch address of the first element in batch to read, can pass NULL to
- * get address of the first element in *out_batch*
+ * get address of the first element in *out_batch*. If not NULL, must be large
+ * enough to hold a key. For **BPF_MAP_TYPE_{HASH, PERCPU_HASH, LRU_HASH,
+ * LRU_PERCPU_HASH}**, the memory size must be at least 4 bytes wide regardless
+ * of key size.
* @param out_batch output parameter that should be passed to next call as *in_batch*
* @param keys pointer to an array of *count* keys
* @param values pointer to an array large enough for *count* values
@@ -492,7 +507,10 @@ LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
* program corresponding to *prog_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param prog_fd BPF program file descriptor
* @param info pointer to **struct bpf_prog_info** that will be populated with
@@ -509,7 +527,10 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
* map corresponding to *map_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param map_fd BPF map file descriptor
* @param info pointer to **struct bpf_map_info** that will be populated with
@@ -522,11 +543,14 @@ LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info,
LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
/**
- * @brief **bpf_btf_get_info_by_fd()** obtains information about the
+ * @brief **bpf_btf_get_info_by_fd()** obtains information about the
* BTF object corresponding to *btf_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param btf_fd BTF object file descriptor
* @param info pointer to **struct bpf_btf_info** that will be populated with
@@ -543,7 +567,10 @@ LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u
* link corresponding to *link_fd*.
*
* Populates up to *info_len* bytes of *info* and updates *info_len* with the
- * actual number of bytes written to *info*.
+ * actual number of bytes written to *info*. Note that *info* should be
+ * zero-initialized or initialized as expected by the requested *info*
+ * type. Failing to (zero-)initialize *info* under certain circumstances can
+ * result in this helper returning an error.
*
* @param link_fd BPF link file descriptor
* @param info pointer to **struct bpf_link_info** that will be populated with
@@ -640,6 +667,30 @@ struct bpf_test_run_opts {
LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
struct bpf_test_run_opts *opts);
+struct bpf_token_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+ size_t :0;
+};
+#define bpf_token_create_opts__last_field flags
+
+/**
+ * @brief **bpf_token_create()** creates a new instance of BPF token derived
+ * from specified BPF FS mount point.
+ *
+ * BPF token created with this API can be passed to bpf() syscall for
+ * commands like BPF_PROG_LOAD, BPF_MAP_CREATE, etc.
+ *
+ * @param bpffs_fd FD for BPF FS instance from which to derive a BPF token
+ * instance.
+ * @param opts optional BPF token creation options, can be NULL
+ *
+ * @return BPF token FD > 0, on success; negative error code, otherwise (errno
+ * is also set to the error code)
+ */
+LIBBPF_API int bpf_token_create(int bpffs_fd,
+ struct bpf_token_create_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 7325a12692..1ce738d916 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -2,6 +2,8 @@
#ifndef __BPF_CORE_READ_H__
#define __BPF_CORE_READ_H__
+#include <bpf/bpf_helpers.h>
+
/*
* enum bpf_field_info_kind is passed as a second argument into
* __builtin_preserve_field_info() built-in to get a specific aspect of
@@ -44,7 +46,7 @@ enum bpf_enum_value_kind {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
bpf_probe_read_kernel( \
- (void *)dst, \
+ (void *)dst, \
__CORE_RELO(src, fld, BYTE_SIZE), \
(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
@@ -143,8 +145,29 @@ enum bpf_enum_value_kind {
} \
})
+/* Differentiator between compilers builtin implementations. This is a
+ * requirement due to the compiler parsing differences where GCC optimizes
+ * early in parsing those constructs of type pointers to the builtin specific
+ * type, resulting in not being possible to collect the required type
+ * information in the builtin expansion.
+ */
+#ifdef __clang__
+#define ___bpf_typeof(type) ((typeof(type) *) 0)
+#else
+#define ___bpf_typeof1(type, NR) ({ \
+ extern typeof(type) *___concat(bpf_type_tmp_, NR); \
+ ___concat(bpf_type_tmp_, NR); \
+})
+#define ___bpf_typeof(type) ___bpf_typeof1(type, __COUNTER__)
+#endif
+
+#ifdef __clang__
#define ___bpf_field_ref1(field) (field)
-#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref2(type, field) (___bpf_typeof(type)->field)
+#else
+#define ___bpf_field_ref1(field) (&(field))
+#define ___bpf_field_ref2(type, field) (&(___bpf_typeof(type)->field))
+#endif
#define ___bpf_field_ref(args...) \
___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
@@ -194,7 +217,7 @@ enum bpf_enum_value_kind {
* BTF. Always succeeds.
*/
#define bpf_core_type_id_local(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_LOCAL)
/*
* Convenience macro to get BTF type ID of a target kernel's type that matches
@@ -204,7 +227,7 @@ enum bpf_enum_value_kind {
* - 0, if no matching type was found in a target kernel BTF.
*/
#define bpf_core_type_id_kernel(type) \
- __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+ __builtin_btf_type_id(*___bpf_typeof(type), BPF_TYPE_ID_TARGET)
/*
* Convenience macro to check that provided named type
@@ -214,7 +237,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_exists(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_EXISTS)
/*
* Convenience macro to check that provided named type
@@ -224,7 +247,7 @@ enum bpf_enum_value_kind {
* 0, if the type does not match any in the target kernel
*/
#define bpf_core_type_matches(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_MATCHES)
/*
* Convenience macro to get the byte size of a provided named type
@@ -234,7 +257,7 @@ enum bpf_enum_value_kind {
* 0, if no matching type is found.
*/
#define bpf_core_type_size(type) \
- __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+ __builtin_preserve_type_info(*___bpf_typeof(type), BPF_TYPE_SIZE)
/*
* Convenience macro to check that provided enumerator value is defined in
@@ -244,8 +267,13 @@ enum bpf_enum_value_kind {
* kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value_exists(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+#else
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_EXISTS)
+#endif
/*
* Convenience macro to get the integer value of an enumerator value in
@@ -255,8 +283,13 @@ enum bpf_enum_value_kind {
* present in target kernel's BTF;
* 0, if no matching enum and/or enum value within that enum is found.
*/
+#ifdef __clang__
#define bpf_core_enum_value(enum_type, enum_value) \
__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
+#else
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(___bpf_typeof(enum_type), enum_value, BPF_ENUMVAL_VALUE)
+#endif
/*
* bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
@@ -268,7 +301,7 @@ enum bpf_enum_value_kind {
* a relocation, which records BTF type ID describing root struct/union and an
* accessor string which describes exact embedded field that was used to take
* an address. See detailed description of this relocation format and
- * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
+ * semantics in comments to struct bpf_core_relo in include/uapi/linux/bpf.h.
*
* This relocation allows libbpf to adjust BPF instruction to use correct
* actual field offset, based on target kernel BTF type that matches original
@@ -292,6 +325,17 @@ enum bpf_enum_value_kind {
#define bpf_core_read_user_str(dst, sz, src) \
bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak;
+
+/*
+ * Cast provided pointer *ptr* into a pointer to a specified *type* in such
+ * a way that BPF verifier will become aware of associated kernel-side BTF
+ * type. This allows to access members of kernel types directly without the
+ * need to use BPF_CORE_READ() macros.
+ */
+#define bpf_core_cast(ptr, type) \
+ ((typeof(type) *)bpf_rdonly_cast((ptr), bpf_core_type_id_kernel(type)))
+
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
#define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 2324cc42b0..cd17f6d079 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -13,6 +13,7 @@
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
#define __array(name, val) typeof(val) *name[]
+#define __ulong(name, val) enum { ___bpf_concat(__unique_value, __COUNTER__) = val } name
/*
* Helper macro to place programs, maps, license in
@@ -190,6 +191,9 @@ enum libbpf_tristate {
#define __arg_ctx __attribute__((btf_decl_tag("arg:ctx")))
#define __arg_nonnull __attribute((btf_decl_tag("arg:nonnull")))
+#define __arg_nullable __attribute((btf_decl_tag("arg:nullable")))
+#define __arg_trusted __attribute((btf_decl_tag("arg:trusted")))
+#define __arg_arena __attribute((btf_decl_tag("arg:arena")))
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index ee95fd379d..2d0840ef59 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1079,6 +1079,11 @@ struct btf *btf__new(const void *data, __u32 size)
return libbpf_ptr(btf_new(data, size, NULL));
}
+struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_new(data, size, base_btf));
+}
+
static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
struct btf_ext **btf_ext)
{
@@ -1317,7 +1322,9 @@ struct btf *btf__parse_split(const char *path, struct btf *base_btf)
static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd)
{
LIBBPF_OPTS(bpf_btf_load_opts, opts);
__u32 buf_sz = 0, raw_size;
@@ -1367,6 +1374,10 @@ retry_load:
opts.log_level = log_level;
}
+ opts.token_fd = token_fd;
+ if (token_fd)
+ opts.btf_flags |= BPF_F_TOKEN_FD;
+
btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
/* time to turn on verbose mode and try again */
@@ -1394,7 +1405,7 @@ done:
int btf__load_into_kernel(struct btf *btf)
{
- return btf_load_into_kernel(btf, NULL, 0, 0);
+ return btf_load_into_kernel(btf, NULL, 0, 0, 0);
}
int btf__fd(const struct btf *btf)
@@ -3039,12 +3050,16 @@ done:
return btf_ext;
}
-const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
+const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size)
{
*size = btf_ext->data_size;
return btf_ext->data;
}
+__attribute__((alias("btf_ext__raw_data")))
+const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+
+
struct btf_dedup;
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
@@ -4926,10 +4941,9 @@ static int btf_dedup_remap_types(struct btf_dedup *d)
*/
struct btf *btf__load_vmlinux_btf(void)
{
+ const char *sysfs_btf_path = "/sys/kernel/btf/vmlinux";
+ /* fall back locations, trying to find vmlinux on disk */
const char *locations[] = {
- /* try canonical vmlinux BTF through sysfs first */
- "/sys/kernel/btf/vmlinux",
- /* fall back to trying to find vmlinux on disk otherwise */
"/boot/vmlinux-%1$s",
"/lib/modules/%1$s/vmlinux-%1$s",
"/lib/modules/%1$s/build/vmlinux",
@@ -4943,8 +4957,23 @@ struct btf *btf__load_vmlinux_btf(void)
struct btf *btf;
int i, err;
- uname(&buf);
+ /* is canonical sysfs location accessible? */
+ if (faccessat(AT_FDCWD, sysfs_btf_path, F_OK, AT_EACCESS) < 0) {
+ pr_warn("kernel BTF is missing at '%s', was CONFIG_DEBUG_INFO_BTF enabled?\n",
+ sysfs_btf_path);
+ } else {
+ btf = btf__parse(sysfs_btf_path, NULL);
+ if (!btf) {
+ err = -errno;
+ pr_warn("failed to read kernel BTF from '%s': %d\n", sysfs_btf_path, err);
+ return libbpf_err_ptr(err);
+ }
+ pr_debug("loaded kernel BTF from '%s'\n", sysfs_btf_path);
+ return btf;
+ }
+ /* try fallback locations */
+ uname(&buf);
for (i = 0; i < ARRAY_SIZE(locations); i++) {
snprintf(path, PATH_MAX, locations[i], buf.release);
diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c
index b02faec748..c92e023941 100644
--- a/tools/lib/bpf/elf.c
+++ b/tools/lib/bpf/elf.c
@@ -11,8 +11,6 @@
#include "libbpf_internal.h"
#include "str_error.h"
-#define STRERR_BUFSIZE 128
-
/* A SHT_GNU_versym section holds 16-bit words. This bit is set if
* the symbol is hidden and can only be seen when referenced using an
* explicit version number. This is a GNU extension.
diff --git a/tools/lib/bpf/features.c b/tools/lib/bpf/features.c
new file mode 100644
index 0000000000..a336786a22
--- /dev/null
+++ b/tools/lib/bpf/features.c
@@ -0,0 +1,583 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <linux/kernel.h>
+#include <linux/filter.h>
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "str_error.h"
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+int probe_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+ return fd >= 0;
+}
+
+static int probe_kern_prog_name(int token_fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.license = ptr_to_u64("GPL");
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
+ attr.prog_token_fd = token_fd;
+ if (token_fd)
+ attr.prog_flags |= BPF_F_TOKEN_FD;
+ libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
+
+ /* make sure loading with name works */
+ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
+ return probe_fd(ret);
+}
+
+static int probe_kern_global_data(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ insns[0].imm = map;
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ close(map);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_func_global(int token_fd)
+{
+ static const char strs[] = "\0int\0x\0a";
+ /* static void x(int a) {} */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* FUNC_PROTO */ /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
+ BTF_PARAM_ENC(7, 1),
+ /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
+ BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_datasec(int token_fd)
+{
+ static const char strs[] = "\0x\0.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC val */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_qmark_datasec(int token_fd)
+{
+ static const char strs[] = "\0x\0?.data";
+ /* static int a; */
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* DATASEC ?.data */ /* [3] */
+ BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
+ BTF_VAR_SECINFO_ENC(2, 0, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_float(int token_fd)
+{
+ static const char strs[] = "\0float";
+ __u32 types[] = {
+ /* float */
+ BTF_TYPE_FLOAT_ENC(1, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_decl_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* attr */
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_btf_type_tag(int token_fd)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* attr */
+ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
+ /* ptr */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_array_mmap(int token_fd)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .map_flags = BPF_F_MMAPABLE | (token_fd ? BPF_F_TOKEN_FD : 0),
+ .token_fd = token_fd,
+ );
+ int fd;
+
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_exp_attach_type(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ /* use any valid combination of program type and (optional)
+ * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
+ * to see if kernel supports expected_attach_type field for
+ * BPF_PROG_LOAD command
+ */
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_probe_read_kernel(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
+ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
+ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_prog_bind_map(int token_fd)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_map_create_opts, map_opts,
+ .token_fd = token_fd,
+ .map_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_prog_load_opts, prog_opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, &map_opts);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
+ }
+
+ prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &prog_opts);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
+}
+
+static int probe_module_btf(int token_fd)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[16];
+ int fd, err;
+
+ fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (fd < 0)
+ return 0; /* BTF not supported at all */
+
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
+ * kernel's module BTF support coincides with support for
+ * name/name_len fields in struct bpf_btf_info.
+ */
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ close(fd);
+ return !err;
+}
+
+static int probe_perf_link(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* use invalid perf_event FD to get EBADF, if link is supported;
+ * otherwise EINVAL should be returned
+ */
+ link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_uprobe_multi_link(int token_fd)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
+ .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, link_fd, err;
+ unsigned long offset = 0;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), &load_opts);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* Creating uprobe in '/' binary should fail with -EBADF. */
+ link_opts.uprobe_multi.path = "/";
+ link_opts.uprobe_multi.offsets = &offset;
+ link_opts.uprobe_multi.cnt = 1;
+
+ link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_kern_bpf_cookie(int token_fd)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf_enum64(int token_fd)
+{
+ static const char strs[] = "\0enum64";
+ __u32 types[] = {
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs), token_fd));
+}
+
+static int probe_kern_arg_ctx_tag(int token_fd)
+{
+ static const char strs[] = "\0a\0b\0arg:ctx\0";
+ const __u32 types[] = {
+ /* [1] INT */
+ BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
+ /* [2] PTR -> VOID */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
+ /* [3] FUNC_PROTO `int(void *a)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(1 /* "a" */, 2),
+ /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
+ BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
+ /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
+ BTF_PARAM_ENC(3 /* "b" */, 2),
+ /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
+ BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
+ /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
+ BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
+ };
+ const struct bpf_insn insns[] = {
+ /* main prog */
+ BPF_CALL_REL(+1),
+ BPF_EXIT_INSN(),
+ /* global subprog */
+ BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
+ BPF_EXIT_INSN(),
+ };
+ const struct bpf_func_info_min func_infos[] = {
+ { 0, 4 }, /* main prog -> FUNC 'a' */
+ { 2, 6 }, /* subprog -> FUNC 'b' */
+ };
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = token_fd,
+ .prog_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
+ int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
+
+ btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs), token_fd);
+ if (btf_fd < 0)
+ return 0;
+
+ opts.prog_btf_fd = btf_fd;
+ opts.func_info = &func_infos;
+ opts.func_info_cnt = ARRAY_SIZE(func_infos);
+ opts.func_info_rec_size = sizeof(func_infos[0]);
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
+ "GPL", insns, insn_cnt, &opts);
+ close(btf_fd);
+
+ return probe_fd(prog_fd);
+}
+
+typedef int (*feature_probe_fn)(int /* token_fd */);
+
+static struct kern_feature_cache feature_cache;
+
+static struct kern_feature_desc {
+ const char *desc;
+ feature_probe_fn probe;
+} feature_probes[__FEAT_CNT] = {
+ [FEAT_PROG_NAME] = {
+ "BPF program name", probe_kern_prog_name,
+ },
+ [FEAT_GLOBAL_DATA] = {
+ "global variables", probe_kern_global_data,
+ },
+ [FEAT_BTF] = {
+ "minimal BTF", probe_kern_btf,
+ },
+ [FEAT_BTF_FUNC] = {
+ "BTF functions", probe_kern_btf_func,
+ },
+ [FEAT_BTF_GLOBAL_FUNC] = {
+ "BTF global function", probe_kern_btf_func_global,
+ },
+ [FEAT_BTF_DATASEC] = {
+ "BTF data section and variable", probe_kern_btf_datasec,
+ },
+ [FEAT_ARRAY_MMAP] = {
+ "ARRAY map mmap()", probe_kern_array_mmap,
+ },
+ [FEAT_EXP_ATTACH_TYPE] = {
+ "BPF_PROG_LOAD expected_attach_type attribute",
+ probe_kern_exp_attach_type,
+ },
+ [FEAT_PROBE_READ_KERN] = {
+ "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
+ },
+ [FEAT_MODULE_BTF] = {
+ "module BTF support", probe_module_btf,
+ },
+ [FEAT_BTF_FLOAT] = {
+ "BTF_KIND_FLOAT support", probe_kern_btf_float,
+ },
+ [FEAT_PERF_LINK] = {
+ "BPF perf link support", probe_perf_link,
+ },
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
+ },
+ [FEAT_BTF_TYPE_TAG] = {
+ "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
+ },
+ [FEAT_MEMCG_ACCOUNT] = {
+ "memcg-based memory accounting", probe_memcg_account,
+ },
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
+ [FEAT_BTF_ENUM64] = {
+ "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
+ },
+ [FEAT_SYSCALL_WRAPPER] = {
+ "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
+ },
+ [FEAT_UPROBE_MULTI_LINK] = {
+ "BPF multi-uprobe link support", probe_uprobe_multi_link,
+ },
+ [FEAT_ARG_CTX_TAG] = {
+ "kernel-side __arg_ctx tag", probe_kern_arg_ctx_tag,
+ },
+ [FEAT_BTF_QMARK_DATASEC] = {
+ "BTF DATASEC names starting from '?'", probe_kern_btf_qmark_datasec,
+ },
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id)
+{
+ struct kern_feature_desc *feat = &feature_probes[feat_id];
+ int ret;
+
+ /* assume global feature cache, unless custom one is provided */
+ if (!cache)
+ cache = &feature_cache;
+
+ if (READ_ONCE(cache->res[feat_id]) == FEAT_UNKNOWN) {
+ ret = feat->probe(cache->token_fd);
+ if (ret > 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_SUPPORTED);
+ } else if (ret == 0) {
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ } else {
+ pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ WRITE_ONCE(cache->res[feat_id], FEAT_MISSING);
+ }
+ }
+
+ return READ_ONCE(cache->res[feat_id]) == FEAT_SUPPORTED;
+}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 028d8f014c..f515cf264a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -60,6 +60,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
+
#define BPF_INSN_SZ (sizeof(struct bpf_insn))
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
@@ -183,6 +185,7 @@ static const char * const map_type_name[] = {
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
[BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
[BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
+ [BPF_MAP_TYPE_ARENA] = "arena",
};
static const char * const prog_type_name[] = {
@@ -495,6 +498,7 @@ struct bpf_struct_ops {
#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
+#define ARENA_SEC ".addr_space.1"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -529,6 +533,7 @@ struct bpf_map {
struct bpf_map_def def;
__u32 numa_node;
__u32 btf_var_idx;
+ int mod_btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
@@ -609,6 +614,7 @@ enum sec_type {
SEC_BSS,
SEC_DATA,
SEC_RODATA,
+ SEC_ST_OPS,
};
struct elf_sec_desc {
@@ -624,8 +630,7 @@ struct elf_state {
Elf *elf;
Elf64_Ehdr *ehdr;
Elf_Data *symbols;
- Elf_Data *st_ops_data;
- Elf_Data *st_ops_link_data;
+ Elf_Data *arena_data;
size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct elf_sec_desc *secs;
@@ -634,8 +639,8 @@ struct elf_state {
__u32 btf_maps_sec_btf_id;
int text_shndx;
int symbols_shndx;
- int st_ops_shndx;
- int st_ops_link_shndx;
+ bool has_st_ops;
+ int arena_data_shndx;
};
struct usdt_manager;
@@ -695,6 +700,14 @@ struct bpf_object {
struct usdt_manager *usdt_man;
+ struct bpf_map *arena_map;
+ void *arena_data;
+ size_t arena_data_sz;
+
+ struct kern_feature_cache *feat_cache;
+ char *token_path;
+ int token_fd;
+
char path[];
};
@@ -932,22 +945,33 @@ find_member_by_name(const struct btf *btf, const struct btf_type *t,
return NULL;
}
+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
+ __u16 kind, struct btf **res_btf,
+ struct module_btf **res_mod_btf);
+
#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind);
static int
-find_struct_ops_kern_types(const struct btf *btf, const char *tname,
+find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
+ struct module_btf **mod_btf,
const struct btf_type **type, __u32 *type_id,
const struct btf_type **vtype, __u32 *vtype_id,
const struct btf_member **data_member)
{
const struct btf_type *kern_type, *kern_vtype;
const struct btf_member *kern_data_member;
+ struct btf *btf;
__s32 kern_vtype_id, kern_type_id;
+ char tname[256];
__u32 i;
- kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
+ snprintf(tname, sizeof(tname), "%.*s",
+ (int)bpf_core_essential_name_len(tname_raw), tname_raw);
+
+ kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
+ &btf, mod_btf);
if (kern_type_id < 0) {
pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
tname);
@@ -1000,15 +1024,72 @@ static bool bpf_map__is_struct_ops(const struct bpf_map *map)
return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
}
+static bool is_valid_st_ops_program(struct bpf_object *obj,
+ const struct bpf_program *prog)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ if (&obj->programs[i] == prog)
+ return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
+ }
+
+ return false;
+}
+
+/* For each struct_ops program P, referenced from some struct_ops map M,
+ * enable P.autoload if there are Ms for which M.autocreate is true,
+ * disable P.autoload if for all Ms M.autocreate is false.
+ * Don't change P.autoload for programs that are not referenced from any maps.
+ */
+static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
+{
+ struct bpf_program *prog, *slot_prog;
+ struct bpf_map *map;
+ int i, j, k, vlen;
+
+ for (i = 0; i < obj->nr_programs; ++i) {
+ int should_load = false;
+ int use_cnt = 0;
+
+ prog = &obj->programs[i];
+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
+ continue;
+
+ for (j = 0; j < obj->nr_maps; ++j) {
+ map = &obj->maps[j];
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ vlen = btf_vlen(map->st_ops->type);
+ for (k = 0; k < vlen; ++k) {
+ slot_prog = map->st_ops->progs[k];
+ if (prog != slot_prog)
+ continue;
+
+ use_cnt++;
+ if (map->autocreate)
+ should_load = true;
+ }
+ }
+ if (use_cnt)
+ prog->autoload = should_load;
+ }
+
+ return 0;
+}
+
/* Init the map's fields that depend on kern_btf */
-static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
- const struct btf *btf,
- const struct btf *kern_btf)
+static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
{
const struct btf_member *member, *kern_member, *kern_data_member;
const struct btf_type *type, *kern_type, *kern_vtype;
__u32 i, kern_type_id, kern_vtype_id, kern_data_off;
+ struct bpf_object *obj = map->obj;
+ const struct btf *btf = obj->btf;
struct bpf_struct_ops *st_ops;
+ const struct btf *kern_btf;
+ struct module_btf *mod_btf;
void *data, *kern_data;
const char *tname;
int err;
@@ -1016,16 +1097,19 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
st_ops = map->st_ops;
type = st_ops->type;
tname = st_ops->tname;
- err = find_struct_ops_kern_types(kern_btf, tname,
+ err = find_struct_ops_kern_types(obj, tname, &mod_btf,
&kern_type, &kern_type_id,
&kern_vtype, &kern_vtype_id,
&kern_data_member);
if (err)
return err;
+ kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
+
pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
+ map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
map->def.value_size = kern_vtype->size;
map->btf_vmlinux_value_type_id = kern_vtype_id;
@@ -1083,9 +1167,16 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
if (btf_is_ptr(mtype)) {
struct bpf_program *prog;
- prog = st_ops->progs[i];
+ /* Update the value from the shadow type */
+ prog = *(void **)mdata;
+ st_ops->progs[i] = prog;
if (!prog)
continue;
+ if (!is_valid_st_ops_program(obj, prog)) {
+ pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
+ map->name, mname);
+ return -ENOTSUP;
+ }
kern_mtype = skip_mods_and_typedefs(kern_btf,
kern_mtype->type,
@@ -1101,8 +1192,34 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
return -ENOTSUP;
}
- prog->attach_btf_id = kern_type_id;
- prog->expected_attach_type = kern_member_idx;
+ if (mod_btf)
+ prog->attach_btf_obj_fd = mod_btf->fd;
+
+ /* if we haven't yet processed this BPF program, record proper
+ * attach_btf_id and member_idx
+ */
+ if (!prog->attach_btf_id) {
+ prog->attach_btf_id = kern_type_id;
+ prog->expected_attach_type = kern_member_idx;
+ }
+
+ /* struct_ops BPF prog can be re-used between multiple
+ * .struct_ops & .struct_ops.link as long as it's the
+ * same struct_ops struct definition and the same
+ * function pointer field
+ */
+ if (prog->attach_btf_id != kern_type_id) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->attach_btf_id, kern_type_id);
+ return -EINVAL;
+ }
+ if (prog->expected_attach_type != kern_member_idx) {
+ pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
+ map->name, mname, prog->name, prog->sec_name, prog->type,
+ prog->expected_attach_type, kern_member_idx);
+ return -EINVAL;
+ }
st_ops->kern_func_off[i] = kern_data_off + kern_moff;
@@ -1143,8 +1260,10 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
if (!bpf_map__is_struct_ops(map))
continue;
- err = bpf_map__init_kern_struct_ops(map, obj->btf,
- obj->btf_vmlinux);
+ if (!map->autocreate)
+ continue;
+
+ err = bpf_map__init_kern_struct_ops(map);
if (err)
return err;
}
@@ -1153,7 +1272,7 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
}
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
- int shndx, Elf_Data *data, __u32 map_flags)
+ int shndx, Elf_Data *data)
{
const struct btf_type *type, *datasec;
const struct btf_var_secinfo *vsi;
@@ -1209,12 +1328,22 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
map->name = strdup(var_name);
if (!map->name)
return -ENOMEM;
+ map->btf_value_type_id = type_id;
+
+ /* Follow same convention as for programs autoload:
+ * SEC("?.struct_ops") means map is not created by default.
+ */
+ if (sec_name[0] == '?') {
+ map->autocreate = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ }
map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
map->def.key_size = sizeof(int);
map->def.value_size = type->size;
map->def.max_entries = 1;
- map->def.map_flags = map_flags;
+ map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -1249,15 +1378,25 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
static int bpf_object_init_struct_ops(struct bpf_object *obj)
{
- int err;
+ const char *sec_name;
+ int sec_idx, err;
- err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
- obj->efile.st_ops_data, 0);
- err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
- obj->efile.st_ops_link_shndx,
- obj->efile.st_ops_link_data,
- BPF_F_LINK);
- return err;
+ for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
+ struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
+
+ if (desc->sec_type != SEC_ST_OPS)
+ continue;
+
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ if (!sec_name)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static struct bpf_object *bpf_object__new(const char *path,
@@ -1295,8 +1434,6 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
obj->efile.btf_maps_shndx = -1;
- obj->efile.st_ops_shndx = -1;
- obj->efile.st_ops_link_shndx = -1;
obj->kconfig_map_idx = -1;
obj->kern_version = get_kernel_version();
@@ -1313,8 +1450,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
elf_end(obj->efile.elf);
obj->efile.elf = NULL;
obj->efile.symbols = NULL;
- obj->efile.st_ops_data = NULL;
- obj->efile.st_ops_link_data = NULL;
+ obj->efile.arena_data = NULL;
zfree(&obj->efile.secs);
obj->efile.sec_cnt = 0;
@@ -1505,11 +1641,24 @@ static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *nam
return ERR_PTR(-ENOENT);
}
+/* Some versions of Android don't provide memfd_create() in their libc
+ * implementation, so avoid complications and just go straight to Linux
+ * syscall.
+ */
+static int sys_memfd_create(const char *name, unsigned flags)
+{
+ return syscall(__NR_memfd_create, name, flags);
+}
+
+#ifndef MFD_CLOEXEC
+#define MFD_CLOEXEC 0x0001U
+#endif
+
static int create_placeholder_fd(void)
{
int fd;
- fd = ensure_good_fd(memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
+ fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
if (fd < 0)
return -errno;
return fd;
@@ -1548,7 +1697,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return map;
}
-static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
+static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
{
const long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
@@ -1558,6 +1707,20 @@ static size_t bpf_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
return map_sz;
}
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+
+ switch (map->def.type) {
+ case BPF_MAP_TYPE_ARRAY:
+ return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ case BPF_MAP_TYPE_ARENA:
+ return page_sz * map->def.max_entries;
+ default:
+ return 0; /* not supported */
+ }
+}
+
static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
{
void *mmaped;
@@ -1700,7 +1863,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->value_size = data_sz;
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
- ? BPF_F_RDONLY_PROG : 0;
+ ? BPF_F_RDONLY_PROG : 0;
/* failures are fine because of maps like .rodata.str1.1 */
(void) map_fill_btf_type_info(obj, map);
@@ -1711,7 +1874,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ mmap_sz = bpf_map_mmap_sz(map);
map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (map->mmaped == MAP_FAILED) {
@@ -2199,6 +2362,46 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static bool get_map_field_long(const char *map_name, const struct btf *btf,
+ const struct btf_member *m, __u64 *res)
+{
+ const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
+ const char *name = btf__name_by_offset(btf, m->name_off);
+
+ if (btf_is_ptr(t)) {
+ __u32 res32;
+ bool ret;
+
+ ret = get_map_field_int(map_name, btf, m, &res32);
+ if (ret)
+ *res = (__u64)res32;
+ return ret;
+ }
+
+ if (!btf_is_enum(t) && !btf_is_enum64(t)) {
+ pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
+ map_name, name, btf_kind_str(t));
+ return false;
+ }
+
+ if (btf_vlen(t) != 1) {
+ pr_warn("map '%s': attr '%s': invalid __ulong\n",
+ map_name, name);
+ return false;
+ }
+
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e = btf_enum(t);
+
+ *res = e->val;
+ } else {
+ const struct btf_enum64 *e = btf_enum64(t);
+
+ *res = btf_enum64_value(e);
+ }
+ return true;
+}
+
static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
{
int len;
@@ -2218,7 +2421,7 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
int err;
if (!path)
- path = "/sys/fs/bpf";
+ path = BPF_FS_DEFAULT_PATH;
err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
if (err)
@@ -2432,9 +2635,9 @@ int parse_btf_map_def(const char *map_name, struct btf *btf,
map_def->pinning = val;
map_def->parts |= MAP_DEF_PINNING;
} else if (strcmp(name, "map_extra") == 0) {
- __u32 map_extra;
+ __u64 map_extra;
- if (!get_map_field_int(map_name, btf, m, &map_extra))
+ if (!get_map_field_long(map_name, btf, m, &map_extra))
return -EINVAL;
map_def->map_extra = map_extra;
map_def->parts |= MAP_DEF_MAP_EXTRA;
@@ -2652,6 +2855,32 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return 0;
}
+static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
+ const char *sec_name, int sec_idx,
+ void *data, size_t data_sz)
+{
+ const long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t mmap_sz;
+
+ mmap_sz = bpf_map_mmap_sz(obj->arena_map);
+ if (roundup(data_sz, page_sz) > mmap_sz) {
+ pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
+ sec_name, mmap_sz, data_sz);
+ return -E2BIG;
+ }
+
+ obj->arena_data = malloc(data_sz);
+ if (!obj->arena_data)
+ return -ENOMEM;
+ memcpy(obj->arena_data, data, data_sz);
+ obj->arena_data_sz = data_sz;
+
+ /* make bpf_map__init_value() work for ARENA maps */
+ map->mmaped = obj->arena_data;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
const char *pin_root_path)
{
@@ -2701,6 +2930,33 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
return err;
}
+ for (i = 0; i < obj->nr_maps; i++) {
+ struct bpf_map *map = &obj->maps[i];
+
+ if (map->def.type != BPF_MAP_TYPE_ARENA)
+ continue;
+
+ if (obj->arena_map) {
+ pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
+ map->name, obj->arena_map->name);
+ return -EINVAL;
+ }
+ obj->arena_map = map;
+
+ if (obj->efile.arena_data) {
+ err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
+ obj->efile.arena_data->d_buf,
+ obj->efile.arena_data->d_size);
+ if (err)
+ return err;
+ }
+ }
+ if (obj->efile.arena_data && !obj->arena_map) {
+ pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
+ ARENA_SEC);
+ return -ENOENT;
+ }
+
return 0;
}
@@ -2733,6 +2989,11 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
return sh->sh_flags & SHF_EXECINSTR;
}
+static bool starts_with_qmark(const char *s)
+{
+ return s && s[0] == '?';
+}
+
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
@@ -2742,9 +3003,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
return !has_func || !has_datasec || !has_func_global || !has_float ||
- !has_decl_tag || !has_type_tag || !has_enum64;
+ !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
}
static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@@ -2756,6 +3018,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
int enum64_placeholder_id = 0;
struct btf_type *t;
int i, j, vlen;
@@ -2782,7 +3045,7 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
name = (char *)btf__name_by_offset(btf, t->name_off);
while (*name) {
- if (*name == '.')
+ if (*name == '.' || *name == '?')
*name = '_';
name++;
}
@@ -2797,6 +3060,14 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
vt = (void *)btf__type_by_id(btf, v->type);
m->name_off = vt->name_off;
}
+ } else if (!has_qmark_datasec && btf_is_datasec(t) &&
+ starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
+ /* replace '?' prefix with '_' for DATASEC names */
+ char *name;
+
+ name = (char *)btf__name_by_offset(btf, t->name_off);
+ if (name[0] == '?')
+ name[0] = '_';
} else if (!has_func && btf_is_func_proto(t)) {
/* replace FUNC_PROTO with ENUM */
vlen = btf_vlen(t);
@@ -2850,14 +3121,13 @@ static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->efile.st_ops_link_shndx >= 0 ||
+ obj->efile.has_st_ops ||
obj->nr_extern > 0;
}
static bool kernel_needs_btf(const struct bpf_object *obj)
{
- return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
+ return obj->efile.has_st_ops;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -3227,7 +3497,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
} else {
/* currently BPF_BTF_LOAD only supports log_level 1 */
err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
- obj->log_level ? 1 : 0);
+ obj->log_level ? 1 : 0, obj->token_fd);
}
if (sanitize) {
if (!err) {
@@ -3558,12 +3828,17 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
sec_desc->sec_type = SEC_RODATA;
sec_desc->shdr = sh;
sec_desc->data = data;
- } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
- obj->efile.st_ops_data = data;
- obj->efile.st_ops_shndx = idx;
- } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
- obj->efile.st_ops_link_data = data;
- obj->efile.st_ops_link_shndx = idx;
+ } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
+ strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
+ sec_desc->sec_type = SEC_ST_OPS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ obj->efile.has_st_ops = true;
+ } else if (strcmp(name, ARENA_SEC) == 0) {
+ obj->efile.arena_data = data;
+ obj->efile.arena_data_shndx = idx;
} else {
pr_info("elf: skipping unrecognized data section(%d) %s\n",
idx, name);
@@ -3579,6 +3854,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
+ strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
idx, name, targ_sec_idx,
@@ -4191,6 +4468,15 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ /* arena data relocation */
+ if (shdr_idx == obj->efile.arena_data_shndx) {
+ reloc_desc->type = RELO_DATA;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->map_idx = obj->arena_map - obj->maps;
+ reloc_desc->sym_off = sym->st_value;
+ return 0;
+ }
+
/* generic map reference relocation */
if (type == LIBBPF_MAP_UNSPEC) {
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
@@ -4548,6 +4834,58 @@ int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
return 0;
}
+static int bpf_object_prepare_token(struct bpf_object *obj)
+{
+ const char *bpffs_path;
+ int bpffs_fd = -1, token_fd, err;
+ bool mandatory;
+ enum libbpf_print_level level;
+
+ /* token is explicitly prevented */
+ if (obj->token_path && obj->token_path[0] == '\0') {
+ pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
+ return 0;
+ }
+
+ mandatory = obj->token_path != NULL;
+ level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
+
+ bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
+ bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
+ if (bpffs_fd < 0) {
+ err = -errno;
+ __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
+ obj->name, err, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? err : 0;
+ }
+
+ token_fd = bpf_token_create(bpffs_fd, 0);
+ close(bpffs_fd);
+ if (token_fd < 0) {
+ if (!mandatory && token_fd == -ENOENT) {
+ pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
+ obj->name, bpffs_path);
+ return 0;
+ }
+ __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
+ obj->name, token_fd, bpffs_path,
+ mandatory ? "" : ", skipping optional step...");
+ return mandatory ? token_fd : 0;
+ }
+
+ obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
+ if (!obj->feat_cache) {
+ close(token_fd);
+ return -ENOMEM;
+ }
+
+ obj->token_fd = token_fd;
+ obj->feat_cache->token_fd = token_fd;
+
+ return 0;
+}
+
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
@@ -4557,6 +4895,10 @@ bpf_object__probe_loading(struct bpf_object *obj)
BPF_EXIT_INSN(),
};
int ret, insn_cnt = ARRAY_SIZE(insns);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .token_fd = obj->token_fd,
+ .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
+ );
if (obj->gen_loader)
return 0;
@@ -4566,9 +4908,9 @@ bpf_object__probe_loading(struct bpf_object *obj)
pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
/* make sure basic loading works */
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0)
- ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@@ -4583,462 +4925,18 @@ bpf_object__probe_loading(struct bpf_object *obj)
return 0;
}
-static int probe_fd(int fd)
-{
- if (fd >= 0)
- close(fd);
- return fd >= 0;
-}
-
-static int probe_kern_prog_name(void)
-{
- const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- union bpf_attr attr;
- int ret;
-
- memset(&attr, 0, attr_sz);
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.license = ptr_to_u64("GPL");
- attr.insns = ptr_to_u64(insns);
- attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
- libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
-
- /* make sure loading with name works */
- ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
- return probe_fd(ret);
-}
-
-static int probe_kern_global_data(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
- BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- insns[0].imm = map;
-
- ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- close(map);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_func_global(void)
-{
- static const char strs[] = "\0int\0x\0a";
- /* static void x(int a) {} */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* FUNC_PROTO */ /* [2] */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
- BTF_PARAM_ENC(7, 1),
- /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
- BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_datasec(void)
-{
- static const char strs[] = "\0x\0.data";
- /* static int a; */
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* DATASEC val */ /* [3] */
- BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
- BTF_VAR_SECINFO_ENC(2, 0, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_float(void)
-{
- static const char strs[] = "\0float";
- __u32 types[] = {
- /* float */
- BTF_TYPE_FLOAT_ENC(1, 4),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_decl_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* VAR x */ /* [2] */
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
- BTF_VAR_STATIC,
- /* attr */
- BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_btf_type_tag(void)
-{
- static const char strs[] = "\0tag";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
- /* attr */
- BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
- /* ptr */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_array_mmap(void)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
- int fd;
-
- fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_exp_attach_type(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- /* use any valid combination of program type and (optional)
- * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
- * to see if kernel supports expected_attach_type field for
- * BPF_PROG_LOAD command
- */
- fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
- return probe_fd(fd);
-}
-
-static int probe_kern_probe_read_kernel(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
- BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
- BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
- BPF_EXIT_INSN(),
- };
- int fd, insn_cnt = ARRAY_SIZE(insns);
-
- fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(fd);
-}
-
-static int probe_prog_bind_map(void)
-{
- char *cp, errmsg[STRERR_BUFSIZE];
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
-
- map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
- if (map < 0) {
- ret = -errno;
- cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, -ret);
- return ret;
- }
-
- prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
- if (prog < 0) {
- close(map);
- return 0;
- }
-
- ret = bpf_prog_bind_map(prog, map, NULL);
-
- close(map);
- close(prog);
-
- return ret >= 0;
-}
-
-static int probe_module_btf(void)
-{
- static const char strs[] = "\0int";
- __u32 types[] = {
- /* int */
- BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
- };
- struct bpf_btf_info info;
- __u32 len = sizeof(info);
- char name[16];
- int fd, err;
-
- fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (fd < 0)
- return 0; /* BTF not supported at all */
-
- memset(&info, 0, sizeof(info));
- info.name = ptr_to_u64(name);
- info.name_len = sizeof(name);
-
- /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
- * kernel's module BTF support coincides with support for
- * name/name_len fields in struct bpf_btf_info.
- */
- err = bpf_btf_get_info_by_fd(fd, &info, &len);
- close(fd);
- return !err;
-}
-
-static int probe_perf_link(void)
-{
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
- insns, ARRAY_SIZE(insns), NULL);
- if (prog_fd < 0)
- return -errno;
-
- /* use invalid perf_event FD to get EBADF, if link is supported;
- * otherwise EINVAL should be returned
- */
- link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_uprobe_multi_link(void)
-{
- LIBBPF_OPTS(bpf_prog_load_opts, load_opts,
- .expected_attach_type = BPF_TRACE_UPROBE_MULTI,
- );
- LIBBPF_OPTS(bpf_link_create_opts, link_opts);
- struct bpf_insn insns[] = {
- BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_EXIT_INSN(),
- };
- int prog_fd, link_fd, err;
- unsigned long offset = 0;
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL",
- insns, ARRAY_SIZE(insns), &load_opts);
- if (prog_fd < 0)
- return -errno;
-
- /* Creating uprobe in '/' binary should fail with -EBADF. */
- link_opts.uprobe_multi.path = "/";
- link_opts.uprobe_multi.offsets = &offset;
- link_opts.uprobe_multi.cnt = 1;
-
- link_fd = bpf_link_create(prog_fd, -1, BPF_TRACE_UPROBE_MULTI, &link_opts);
- err = -errno; /* close() can clobber errno */
-
- if (link_fd >= 0)
- close(link_fd);
- close(prog_fd);
-
- return link_fd < 0 && err == -EBADF;
-}
-
-static int probe_kern_bpf_cookie(void)
-{
- struct bpf_insn insns[] = {
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
- BPF_EXIT_INSN(),
- };
- int ret, insn_cnt = ARRAY_SIZE(insns);
-
- ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
- return probe_fd(ret);
-}
-
-static int probe_kern_btf_enum64(void)
-{
- static const char strs[] = "\0enum64";
- __u32 types[] = {
- BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
- };
-
- return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs)));
-}
-
-static int probe_kern_syscall_wrapper(void);
-
-enum kern_feature_result {
- FEAT_UNKNOWN = 0,
- FEAT_SUPPORTED = 1,
- FEAT_MISSING = 2,
-};
-
-typedef int (*feature_probe_fn)(void);
-
-static struct kern_feature_desc {
- const char *desc;
- feature_probe_fn probe;
- enum kern_feature_result res;
-} feature_probes[__FEAT_CNT] = {
- [FEAT_PROG_NAME] = {
- "BPF program name", probe_kern_prog_name,
- },
- [FEAT_GLOBAL_DATA] = {
- "global variables", probe_kern_global_data,
- },
- [FEAT_BTF] = {
- "minimal BTF", probe_kern_btf,
- },
- [FEAT_BTF_FUNC] = {
- "BTF functions", probe_kern_btf_func,
- },
- [FEAT_BTF_GLOBAL_FUNC] = {
- "BTF global function", probe_kern_btf_func_global,
- },
- [FEAT_BTF_DATASEC] = {
- "BTF data section and variable", probe_kern_btf_datasec,
- },
- [FEAT_ARRAY_MMAP] = {
- "ARRAY map mmap()", probe_kern_array_mmap,
- },
- [FEAT_EXP_ATTACH_TYPE] = {
- "BPF_PROG_LOAD expected_attach_type attribute",
- probe_kern_exp_attach_type,
- },
- [FEAT_PROBE_READ_KERN] = {
- "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
- },
- [FEAT_PROG_BIND_MAP] = {
- "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
- },
- [FEAT_MODULE_BTF] = {
- "module BTF support", probe_module_btf,
- },
- [FEAT_BTF_FLOAT] = {
- "BTF_KIND_FLOAT support", probe_kern_btf_float,
- },
- [FEAT_PERF_LINK] = {
- "BPF perf link support", probe_perf_link,
- },
- [FEAT_BTF_DECL_TAG] = {
- "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
- },
- [FEAT_BTF_TYPE_TAG] = {
- "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
- },
- [FEAT_MEMCG_ACCOUNT] = {
- "memcg-based memory accounting", probe_memcg_account,
- },
- [FEAT_BPF_COOKIE] = {
- "BPF cookie support", probe_kern_bpf_cookie,
- },
- [FEAT_BTF_ENUM64] = {
- "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
- },
- [FEAT_SYSCALL_WRAPPER] = {
- "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
- },
- [FEAT_UPROBE_MULTI_LINK] = {
- "BPF multi-uprobe link support", probe_uprobe_multi_link,
- },
-};
-
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
- struct kern_feature_desc *feat = &feature_probes[feat_id];
- int ret;
-
- if (obj && obj->gen_loader)
+ if (obj->gen_loader)
/* To generate loader program assume the latest kernel
* to avoid doing extra prog_load, map_create syscalls.
*/
return true;
- if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
- ret = feat->probe();
- if (ret > 0) {
- WRITE_ONCE(feat->res, FEAT_SUPPORTED);
- } else if (ret == 0) {
- WRITE_ONCE(feat->res, FEAT_MISSING);
- } else {
- pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
- WRITE_ONCE(feat->res, FEAT_MISSING);
- }
- }
+ if (obj->token_fd)
+ return feat_supported(obj->feat_cache, feat_id);
- return READ_ONCE(feat->res) == FEAT_SUPPORTED;
+ return feat_supported(NULL, feat_id);
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
@@ -5119,6 +5017,7 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
return 0;
}
+
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -5162,9 +5061,17 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.map_flags = def->map_flags;
create_attr.numa_node = map->numa_node;
create_attr.map_extra = map->map_extra;
+ create_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ create_attr.map_flags |= BPF_F_TOKEN_FD;
- if (bpf_map__is_struct_ops(map))
+ if (bpf_map__is_struct_ops(map)) {
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+ if (map->mod_btf_fd >= 0) {
+ create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
+ create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
+ }
+ }
if (obj->btf && btf__fd(obj->btf) >= 0) {
create_attr.btf_fd = btf__fd(obj->btf);
@@ -5203,11 +5110,16 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
+ case BPF_MAP_TYPE_ARENA:
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
map->btf_key_type_id = 0;
map->btf_value_type_id = 0;
+ break;
+ case BPF_MAP_TYPE_STRUCT_OPS:
+ create_attr.btf_value_type_id = 0;
+ break;
default:
break;
}
@@ -5443,7 +5355,23 @@ retry:
if (err < 0)
goto err_out;
}
-
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ map->mmaped = mmap((void *)(long)map->map_extra,
+ bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
+ map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
+ map->fd, 0);
+ if (map->mmaped == MAP_FAILED) {
+ err = -errno;
+ map->mmaped = NULL;
+ pr_warn("map '%s': failed to mmap arena: %d\n",
+ map->name, err);
+ return err;
+ }
+ if (obj->arena_data) {
+ memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
+ zfree(&obj->arena_data);
+ }
+ }
if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
err = init_map_in_map_slots(obj, map);
if (err < 0)
@@ -6841,69 +6769,6 @@ static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_progr
return fn_id;
}
-static int probe_kern_arg_ctx_tag(void)
-{
- /* To minimize merge conflicts with BPF token series that refactors
- * feature detection code a lot, we don't integrate
- * probe_kern_arg_ctx_tag() into kernel_supports() feature-detection
- * framework yet, doing our own caching internally.
- * This will be cleaned up a bit later when bpf/bpf-next trees settle.
- */
- static int cached_result = -1;
- static const char strs[] = "\0a\0b\0arg:ctx\0";
- const __u32 types[] = {
- /* [1] INT */
- BTF_TYPE_INT_ENC(1 /* "a" */, BTF_INT_SIGNED, 0, 32, 4),
- /* [2] PTR -> VOID */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 0),
- /* [3] FUNC_PROTO `int(void *a)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(1 /* "a" */, 2),
- /* [4] FUNC 'a' -> FUNC_PROTO (main prog) */
- BTF_TYPE_ENC(1 /* "a" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 3),
- /* [5] FUNC_PROTO `int(void *b __arg_ctx)` */
- BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 1),
- BTF_PARAM_ENC(3 /* "b" */, 2),
- /* [6] FUNC 'b' -> FUNC_PROTO (subprog) */
- BTF_TYPE_ENC(3 /* "b" */, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 5),
- /* [7] DECL_TAG 'arg:ctx' -> func 'b' arg 'b' */
- BTF_TYPE_DECL_TAG_ENC(5 /* "arg:ctx" */, 6, 0),
- };
- const struct bpf_insn insns[] = {
- /* main prog */
- BPF_CALL_REL(+1),
- BPF_EXIT_INSN(),
- /* global subprog */
- BPF_EMIT_CALL(BPF_FUNC_get_func_ip), /* needs PTR_TO_CTX */
- BPF_EXIT_INSN(),
- };
- const struct bpf_func_info_min func_infos[] = {
- { 0, 4 }, /* main prog -> FUNC 'a' */
- { 2, 6 }, /* subprog -> FUNC 'b' */
- };
- LIBBPF_OPTS(bpf_prog_load_opts, opts);
- int prog_fd, btf_fd, insn_cnt = ARRAY_SIZE(insns);
-
- if (cached_result >= 0)
- return cached_result;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
- if (btf_fd < 0)
- return 0;
-
- opts.prog_btf_fd = btf_fd;
- opts.func_info = &func_infos;
- opts.func_info_cnt = ARRAY_SIZE(func_infos);
- opts.func_info_rec_size = sizeof(func_infos[0]);
-
- prog_fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, "det_arg_ctx",
- "GPL", insns, insn_cnt, &opts);
- close(btf_fd);
-
- cached_result = probe_fd(prog_fd);
- return cached_result;
-}
-
/* Check if main program or global subprog's function prototype has `arg:ctx`
* argument tags, and, if necessary, substitute correct type to match what BPF
* verifier would expect, taking into account specific program type. This
@@ -6928,7 +6793,7 @@ static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_progra
return 0;
/* don't do any fix ups if kernel natively supports __arg_ctx */
- if (probe_kern_arg_ctx_tag() > 0)
+ if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
return 0;
/* some BPF program types just don't have named context structs, so
@@ -7315,12 +7180,12 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
data = sec_desc->data;
idx = shdr->sh_info;
- if (shdr->sh_type != SHT_REL) {
+ if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
+ if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
@@ -7495,6 +7360,10 @@ static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog
load_attr.prog_flags = prog->prog_flags;
load_attr.fd_array = obj->fd_array;
+ load_attr.token_fd = obj->token_fd;
+ if (obj->token_fd)
+ load_attr.prog_flags |= BPF_F_TOKEN_FD;
+
/* adjust load_attr if sec_def provides custom preload callback */
if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
@@ -7940,7 +7809,7 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig, *btf_tmp_path;
+ const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
@@ -7977,6 +7846,16 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
if (log_size && !log_buf)
return ERR_PTR(-EINVAL);
+ token_path = OPTS_GET(opts, bpf_token_path, NULL);
+ /* if user didn't specify bpf_token_path explicitly, check if
+ * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
+ * option
+ */
+ if (!token_path)
+ token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
+ if (token_path && strlen(token_path) >= PATH_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
@@ -7985,6 +7864,14 @@ static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf,
obj->log_size = log_size;
obj->log_level = log_level;
+ if (token_path) {
+ obj->token_path = strdup(token_path);
+ if (!obj->token_path) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
if (btf_tmp_path) {
if (strlen(btf_tmp_path) >= PATH_MAX) {
@@ -8471,11 +8358,20 @@ static void bpf_map_prepare_vdata(const struct bpf_map *map)
static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
{
+ struct bpf_map *map;
int i;
- for (i = 0; i < obj->nr_maps; i++)
- if (bpf_map__is_struct_ops(&obj->maps[i]))
- bpf_map_prepare_vdata(&obj->maps[i]);
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
+
+ if (!bpf_map__is_struct_ops(map))
+ continue;
+
+ if (!map->autocreate)
+ continue;
+
+ bpf_map_prepare_vdata(map);
+ }
return 0;
}
@@ -8495,11 +8391,13 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
- err = bpf_object__probe_loading(obj);
+ err = bpf_object_prepare_token(obj);
+ err = err ? : bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_maps(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
+ err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__create_maps(obj);
@@ -8969,13 +8867,9 @@ static void bpf_map__destroy(struct bpf_map *map)
zfree(&map->init_slots);
map->init_slots_sz = 0;
- if (map->mmaped) {
- size_t mmap_sz;
-
- mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- munmap(map->mmaped, mmap_sz);
- map->mmaped = NULL;
- }
+ if (map->mmaped && map->mmaped != map->obj->arena_data)
+ munmap(map->mmaped, bpf_map_mmap_sz(map));
+ map->mmaped = NULL;
if (map->st_ops) {
zfree(&map->st_ops->data);
@@ -9030,6 +8924,13 @@ void bpf_object__close(struct bpf_object *obj)
}
zfree(&obj->programs);
+ zfree(&obj->feat_cache);
+ zfree(&obj->token_path);
+ if (obj->token_fd > 0)
+ close(obj->token_fd);
+
+ zfree(&obj->arena_data);
+
free(obj);
}
@@ -9690,7 +9591,9 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
return NULL;
}
-/* Collect the reloc from ELF and populate the st_ops->progs[] */
+/* Collect the reloc from ELF, populate the st_ops->progs[], and update
+ * st_ops->data for shadow type.
+ */
static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
Elf64_Shdr *shdr, Elf_Data *data)
{
@@ -9782,28 +9685,15 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL;
}
- /* if we haven't yet processed this BPF program, record proper
- * attach_btf_id and member_idx
- */
- if (!prog->attach_btf_id) {
- prog->attach_btf_id = st_ops->type_id;
- prog->expected_attach_type = member_idx;
- }
+ st_ops->progs[member_idx] = prog;
- /* struct_ops BPF prog can be re-used between multiple
- * .struct_ops & .struct_ops.link as long as it's the
- * same struct_ops struct definition and the same
- * function pointer field
+ /* st_ops->data will be exposed to users, being returned by
+ * bpf_map__initial_value() as a pointer to the shadow
+ * type. All function pointers in the original struct type
+ * should be converted to a pointer to struct bpf_program
+ * in the shadow type.
*/
- if (prog->attach_btf_id != st_ops->type_id ||
- prog->expected_attach_type != member_idx) {
- pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->sec_name, prog->type,
- prog->attach_btf_id, prog->expected_attach_type, name);
- return -EINVAL;
- }
-
- st_ops->progs[member_idx] = prog;
+ *((struct bpf_program **)(st_ops->data + moff)) = prog;
}
return 0;
@@ -9988,7 +9878,9 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
*btf_obj_fd = 0;
*btf_type_id = 1;
} else {
- err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ err = find_kernel_btf_id(prog->obj, attach_name,
+ attach_type, btf_obj_fd,
+ btf_type_id);
}
if (err) {
pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
@@ -10210,11 +10102,14 @@ int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
return libbpf_err(-EBUSY);
if (map->mmaped) {
- int err;
size_t mmap_old_sz, mmap_new_sz;
+ int err;
- mmap_old_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
- mmap_new_sz = bpf_map_mmap_sz(size, map->def.max_entries);
+ if (map->def.type != BPF_MAP_TYPE_ARRAY)
+ return -EOPNOTSUPP;
+
+ mmap_old_sz = bpf_map_mmap_sz(map);
+ mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
if (err) {
pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
@@ -10247,22 +10142,41 @@ __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size)
{
+ size_t actual_sz;
+
if (map->obj->loaded || map->reused)
return libbpf_err(-EBUSY);
- if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
- size != map->def.value_size)
+ if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
+ return libbpf_err(-EINVAL);
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ actual_sz = map->obj->arena_data_sz;
+ else
+ actual_sz = map->def.value_size;
+ if (size != actual_sz)
return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
return 0;
}
-void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
+void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
{
+ if (bpf_map__is_struct_ops(map)) {
+ if (psize)
+ *psize = map->def.value_size;
+ return map->st_ops->data;
+ }
+
if (!map->mmaped)
return NULL;
- *psize = map->def.value_size;
+
+ if (map->def.type == BPF_MAP_TYPE_ARENA)
+ *psize = map->obj->arena_data_sz;
+ else
+ *psize = map->def.value_size;
+
return map->mmaped;
}
@@ -11050,7 +10964,7 @@ static const char *arch_specific_syscall_pfx(void)
#endif
}
-static int probe_kern_syscall_wrapper(void)
+int probe_kern_syscall_wrapper(int token_fd)
{
char syscall_name[64];
const char *ksys_pfx;
@@ -13739,7 +13653,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
- size_t mmap_sz = bpf_map_mmap_sz(map->def.value_size, map->def.max_entries);
+ size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
@@ -13751,6 +13665,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
continue;
}
+ if (map->def.type == BPF_MAP_TYPE_ARENA) {
+ *mmaped = map->mmaped;
+ continue;
+ }
+
if (map->def.map_flags & BPF_F_RDONLY_PROG)
prot = PROT_READ;
else
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 6cd9c50162..7b510761f5 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -177,10 +177,29 @@ struct bpf_object_open_opts {
* logs through its print callback.
*/
__u32 kernel_log_level;
+ /* Path to BPF FS mount point to derive BPF token from.
+ *
+ * Created BPF token will be used for all bpf() syscall operations
+ * that accept BPF token (e.g., map creation, BTF and program loads,
+ * etc) automatically within instantiated BPF object.
+ *
+ * If bpf_token_path is not specified, libbpf will consult
+ * LIBBPF_BPF_TOKEN_PATH environment variable. If set, it will be
+ * taken as a value of bpf_token_path option and will force libbpf to
+ * either create BPF token from provided custom BPF FS path, or will
+ * disable implicit BPF token creation, if envvar value is an empty
+ * string. bpf_token_path overrides LIBBPF_BPF_TOKEN_PATH, if both are
+ * set at the same time.
+ *
+ * Setting bpf_token_path option to empty string disables libbpf's
+ * automatic attempt to create BPF token from default BPF FS mount
+ * point (/sys/fs/bpf), in case this default behavior is undesirable.
+ */
+ const char *bpf_token_path;
size_t :0;
};
-#define bpf_object_open_opts__last_field kernel_log_level
+#define bpf_object_open_opts__last_field bpf_token_path
/**
* @brief **bpf_object__open()** creates a bpf_object by opening
@@ -995,7 +1014,7 @@ LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
-LIBBPF_API void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
+LIBBPF_API void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize);
/**
* @brief **bpf_map__is_internal()** tells the caller whether or not the
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 91c5aef7da..86804fd90d 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -245,7 +245,6 @@ LIBBPF_0.3.0 {
btf__parse_raw_split;
btf__parse_split;
btf__new_empty_split;
- btf__new_split;
ring_buffer__epoll_fd;
} LIBBPF_0.2.0;
@@ -326,7 +325,6 @@ LIBBPF_0.7.0 {
bpf_xdp_detach;
bpf_xdp_query;
bpf_xdp_query_id;
- btf_ext__raw_data;
libbpf_probe_bpf_helper;
libbpf_probe_bpf_map_type;
libbpf_probe_bpf_prog_type;
@@ -411,4 +409,8 @@ LIBBPF_1.3.0 {
} LIBBPF_1.2.0;
LIBBPF_1.4.0 {
+ global:
+ bpf_token_create;
+ btf__new_split;
+ btf_ext__raw_data;
} LIBBPF_1.3.0;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 878a0d0b51..864b361774 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <fcntl.h>
#include <unistd.h>
+#include <sys/syscall.h>
#include <libelf.h>
#include "relo_core.h"
@@ -371,18 +372,39 @@ enum kern_feature_id {
FEAT_SYSCALL_WRAPPER,
/* BPF multi-uprobe link support */
FEAT_UPROBE_MULTI_LINK,
+ /* Kernel supports arg:ctx tag (__arg_ctx) for global subprogs natively */
+ FEAT_ARG_CTX_TAG,
+ /* Kernel supports '?' at the front of datasec names */
+ FEAT_BTF_QMARK_DATASEC,
__FEAT_CNT,
};
-int probe_memcg_account(void);
+enum kern_feature_result {
+ FEAT_UNKNOWN = 0,
+ FEAT_SUPPORTED = 1,
+ FEAT_MISSING = 2,
+};
+
+struct kern_feature_cache {
+ enum kern_feature_result res[__FEAT_CNT];
+ int token_fd;
+};
+
+bool feat_supported(struct kern_feature_cache *cache, enum kern_feature_id feat_id);
bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+
+int probe_kern_syscall_wrapper(int token_fd);
+int probe_memcg_account(int token_fd);
int bump_rlimit_memlock(void);
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len);
-int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
+ const char *str_sec, size_t str_len,
+ int token_fd);
+int btf_load_into_kernel(struct btf *btf,
+ char *log_buf, size_t log_sz, __u32 log_level,
+ int token_fd);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
@@ -546,6 +568,17 @@ static inline bool is_ldimm64_insn(struct bpf_insn *insn)
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}
+/* Unconditionally dup FD, ensuring it doesn't use [0, 2] range.
+ * Original FD is not closed or altered in any other way.
+ * Preserves original FD value, if it's invalid (negative).
+ */
+static inline int dup_good_fd(int fd)
+{
+ if (fd < 0)
+ return fd;
+ return fcntl(fd, F_DUPFD_CLOEXEC, 3);
+}
+
/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
* Takes ownership of the fd passed in, and closes it if calling
* fcntl(fd, F_DUPFD_CLOEXEC, 3).
@@ -557,7 +590,7 @@ static inline int ensure_good_fd(int fd)
if (fd < 0)
return fd;
if (fd < 3) {
- fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ fd = dup_good_fd(fd);
saved_errno = errno;
close(old_fd);
errno = saved_errno;
@@ -569,6 +602,15 @@ static inline int ensure_good_fd(int fd)
return fd;
}
+static inline int sys_dup2(int oldfd, int newfd)
+{
+#ifdef __NR_dup2
+ return syscall(__NR_dup2, oldfd, newfd);
+#else
+ return syscall(__NR_dup3, oldfd, newfd, 0);
+#endif
+}
+
/* Point *fixed_fd* to the same file that *tmp_fd* points to.
* Regardless of success, *tmp_fd* is closed.
* Whatever *fixed_fd* pointed to is closed silently.
@@ -577,7 +619,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd)
{
int err;
- err = dup2(tmp_fd, fixed_fd);
+ err = sys_dup2(tmp_fd, fixed_fd);
err = err < 0 ? -errno : 0;
close(tmp_fd); /* clean up temporary FD */
return err;
@@ -627,4 +669,6 @@ int elf_resolve_syms_offsets(const char *binary_path, int cnt,
int elf_resolve_pattern_offsets(const char *binary_path, const char *pattern,
unsigned long **poffsets, size_t *pcnt);
+int probe_fd(int fd);
+
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 9c4db90b92..3021881224 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -219,7 +219,8 @@ int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
}
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
- const char *str_sec, size_t str_len)
+ const char *str_sec, size_t str_len,
+ int token_fd)
{
struct btf_header hdr = {
.magic = BTF_MAGIC,
@@ -229,6 +230,10 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
.str_off = types_len,
.str_len = str_len,
};
+ LIBBPF_OPTS(bpf_btf_load_opts, opts,
+ .token_fd = token_fd,
+ .btf_flags = token_fd ? BPF_F_TOKEN_FD : 0,
+ );
int btf_fd, btf_len;
__u8 *raw_btf;
@@ -241,7 +246,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
- btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
+ btf_fd = bpf_btf_load(raw_btf, btf_len, &opts);
free(raw_btf);
return btf_fd;
@@ -271,7 +276,7 @@ static int load_local_storage_btf(void)
};
return libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
+ strs, sizeof(strs), 0);
}
static int probe_map_create(enum bpf_map_type map_type)
@@ -326,12 +331,20 @@ static int probe_map_create(enum bpf_map_type map_type)
case BPF_MAP_TYPE_STRUCT_OPS:
/* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
opts.btf_vmlinux_value_type_id = 1;
+ opts.value_type_btf_obj_fd = -1;
exp_err = -524; /* -ENOTSUPP */
break;
case BPF_MAP_TYPE_BLOOM_FILTER:
key_size = 0;
max_entries = 1;
break;
+ case BPF_MAP_TYPE_ARENA:
+ key_size = 0;
+ value_size = 0;
+ max_entries = 1; /* one page */
+ opts.map_extra = 0; /* can mmap() at any address */
+ opts.map_flags = BPF_F_MMAPABLE;
+ break;
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PROG_ARRAY:
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 16bca56002..0d4be82955 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -2732,7 +2732,7 @@ static int finalize_btf(struct bpf_linker *linker)
/* Emit .BTF.ext section */
if (linker->btf_ext) {
- raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
+ raw_data = btf_ext__raw_data(linker->btf_ext, &raw_sz);
if (!raw_data)
return -ENOMEM;
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
index a139334d57..626d7ffb03 100644
--- a/tools/lib/bpf/str_error.h
+++ b/tools/lib/bpf/str_error.h
@@ -2,5 +2,8 @@
#ifndef __LIBBPF_STR_ERROR_H
#define __LIBBPF_STR_ERROR_H
+#define STRERR_BUFSIZE 128
+
char *libbpf_strerror_r(int err, char *dst, int len);
+
#endif /* __LIBBPF_STR_ERROR_H */
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 9fa75943f2..d943d78b78 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -633,11 +633,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
+ char *buf = NULL;
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
- char *buf = NULL;
-
astrcatf(&buf, "%s %s [<options>] {", subcmd_config.exec_name, argv[0]);
for (int i = 0; subcommands[i]; i++) {
@@ -679,7 +678,10 @@ int parse_options_subcommand(int argc, const char **argv, const struct option *o
astrcatf(&error_buf, "unknown switch `%c'", *ctx.opt);
usage_with_options(usagestr, options);
}
-
+ if (buf) {
+ usagestr[0] = NULL;
+ free(buf);
+ }
return parse_options_end(&ctx);
}
diff --git a/tools/lib/subcmd/run-command.c b/tools/lib/subcmd/run-command.c
index 5cdac21625..d435eb4235 100644
--- a/tools/lib/subcmd/run-command.c
+++ b/tools/lib/subcmd/run-command.c
@@ -122,6 +122,8 @@ int start_command(struct child_process *cmd)
}
if (cmd->preexec_cb)
cmd->preexec_cb();
+ if (cmd->no_exec_cmd)
+ exit(cmd->no_exec_cmd(cmd));
if (cmd->exec_cmd) {
execv_cmd(cmd->argv);
} else {
diff --git a/tools/lib/subcmd/run-command.h b/tools/lib/subcmd/run-command.h
index 17d969c6ad..d794138a79 100644
--- a/tools/lib/subcmd/run-command.h
+++ b/tools/lib/subcmd/run-command.h
@@ -47,6 +47,8 @@ struct child_process {
unsigned exec_cmd:1; /* if this is to be external sub-command */
unsigned stdout_to_stderr:1;
void (*preexec_cb)(void);
+ /* If set, call function in child rather than doing an exec. */
+ int (*no_exec_cmd)(struct child_process *process);
};
int start_command(struct child_process *);