summaryrefslogtreecommitdiffstats
path: root/tools/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/bpf')
-rw-r--r--tools/bpf/.gitignore7
-rw-r--r--tools/bpf/Makefile135
-rw-r--r--tools/bpf/bpf_asm.c52
-rw-r--r--tools/bpf/bpf_dbg.c1398
-rw-r--r--tools/bpf/bpf_exp.l198
-rw-r--r--tools/bpf/bpf_exp.y668
-rw-r--r--tools/bpf/bpf_jit_disasm.c332
-rw-r--r--tools/bpf/bpftool/.gitignore10
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile60
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst268
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst157
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst90
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst446
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-iter.rst76
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst112
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst277
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst182
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst69
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst375
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst92
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst70
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst25
-rw-r--r--tools/bpf/bpftool/Documentation/substitutions.rst3
-rw-r--r--tools/bpf/bpftool/Makefile299
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool1211
-rw-r--r--tools/bpf/bpftool/btf.c1088
-rw-r--r--tools/bpf/bpftool/btf_dumper.c906
-rw-r--r--tools/bpf/bpftool/cfg.c488
-rw-r--r--tools/bpf/bpftool/cfg.h12
-rw-r--r--tools/bpf/bpftool/cgroup.c655
-rw-r--r--tools/bpf/bpftool/common.c1110
-rw-r--r--tools/bpf/bpftool/feature.c1344
-rw-r--r--tools/bpf/bpftool/gen.c2336
-rw-r--r--tools/bpf/bpftool/iter.c123
-rw-r--r--tools/bpf/bpftool/jit_disasm.c403
-rw-r--r--tools/bpf/bpftool/json_writer.c355
-rw-r--r--tools/bpf/bpftool/json_writer.h74
-rw-r--r--tools/bpf/bpftool/link.c1056
-rw-r--r--tools/bpf/bpftool/main.c547
-rw-r--r--tools/bpf/bpftool/main.h273
-rw-r--r--tools/bpf/bpftool/map.c1499
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c226
-rw-r--r--tools/bpf/bpftool/net.c953
-rw-r--r--tools/bpf/bpftool/netlink_dumper.c178
-rw-r--r--tools/bpf/bpftool/netlink_dumper.h103
-rw-r--r--tools/bpf/bpftool/perf.c258
-rw-r--r--tools/bpf/bpftool/pids.c256
-rw-r--r--tools/bpf/bpftool/prog.c2514
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c119
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.h14
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c125
-rw-r--r--tools/bpf/bpftool/struct_ops.c642
-rw-r--r--tools/bpf/bpftool/tracelog.c166
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c434
-rw-r--r--tools/bpf/bpftool/xlated_dumper.h42
-rw-r--r--tools/bpf/resolve_btfids/.gitignore4
-rw-r--r--tools/bpf/resolve_btfids/Build12
-rw-r--r--tools/bpf/resolve_btfids/Makefile112
-rw-r--r--tools/bpf/resolve_btfids/main.c783
-rw-r--r--tools/bpf/runqslower/.gitignore2
-rw-r--r--tools/bpf/runqslower/Makefile92
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c107
-rw-r--r--tools/bpf/runqslower/runqslower.c171
-rw-r--r--tools/bpf/runqslower/runqslower.h13
64 files changed, 26207 insertions, 0 deletions
diff --git a/tools/bpf/.gitignore b/tools/bpf/.gitignore
new file mode 100644
index 0000000000..cf53342175
--- /dev/null
+++ b/tools/bpf/.gitignore
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+FEATURE-DUMP.bpf
+feature
+bpf_asm
+bpf_dbg
+bpf_exp.yacc.*
+bpf_jit_disasm
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
new file mode 100644
index 0000000000..243b79f2b4
--- /dev/null
+++ b/tools/bpf/Makefile
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../scripts/Makefile.include
+
+prefix ?= /usr/local
+
+LEX = flex
+YACC = bison
+MAKE = make
+INSTALL ?= install
+
+CFLAGS += -Wall -O2
+CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi \
+ -I$(srctree)/tools/include
+
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifeq ($(srctree),)
+update_srctree := 1
+endif
+ifndef building_out_of_srctree
+update_srctree := 1
+endif
+ifeq ($(update_srctree),1)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+FEATURE_USER = .bpf
+FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
+FEATURE_DISPLAY = libbfd
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+ifeq ($(feature-disassembler-four-args), 1)
+CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+endif
+ifeq ($(feature-disassembler-init-styled), 1)
+CFLAGS += -DDISASM_INIT_STYLED
+endif
+
+$(OUTPUT)%.yacc.c: $(srctree)/tools/bpf/%.y
+ $(QUIET_BISON)$(YACC) -o $@ -d $<
+
+$(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l
+ $(QUIET_FLEX)$(LEX) -o $@ $<
+
+$(OUTPUT)%.o: $(srctree)/tools/bpf/%.c
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
+
+$(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
+$(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
+
+PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm
+
+all: $(PROGS) bpftool runqslower
+
+$(OUTPUT)bpf_jit_disasm: CFLAGS += -DPACKAGE='bpf_jit_disasm'
+$(OUTPUT)bpf_jit_disasm: $(OUTPUT)bpf_jit_disasm.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ -lopcodes -lbfd -ldl
+
+$(OUTPUT)bpf_dbg: $(OUTPUT)bpf_dbg.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^ -lreadline
+
+$(OUTPUT)bpf_asm: $(OUTPUT)bpf_asm.o $(OUTPUT)bpf_exp.yacc.o $(OUTPUT)bpf_exp.lex.o
+ $(QUIET_LINK)$(CC) $(CFLAGS) -o $@ $^
+
+$(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
+$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c
+$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c
+
+clean: bpftool_clean runqslower_clean resolve_btfids_clean
+ $(call QUIET_CLEAN, bpf-progs)
+ $(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \
+ $(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.*
+ $(call QUIET_CLEAN, core-gen)
+ $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
+ $(Q)$(RM) -r -- $(OUTPUT)feature
+
+install: $(PROGS) bpftool_install
+ $(call QUIET_INSTALL, bpf_jit_disasm)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
+ $(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
+ $(call QUIET_INSTALL, bpf_dbg)
+ $(Q)$(INSTALL) $(OUTPUT)bpf_dbg $(DESTDIR)$(prefix)/bin/bpf_dbg
+ $(call QUIET_INSTALL, bpf_asm)
+ $(Q)$(INSTALL) $(OUTPUT)bpf_asm $(DESTDIR)$(prefix)/bin/bpf_asm
+
+bpftool:
+ $(call descend,bpftool)
+
+bpftool_install:
+ $(call descend,bpftool,install)
+
+bpftool_clean:
+ $(call descend,bpftool,clean)
+
+runqslower:
+ $(call descend,runqslower)
+
+runqslower_clean:
+ $(call descend,runqslower,clean)
+
+resolve_btfids:
+ $(call descend,resolve_btfids)
+
+resolve_btfids_clean:
+ $(call descend,resolve_btfids,clean)
+
+.PHONY: all install clean bpftool bpftool_install bpftool_clean \
+ runqslower runqslower_clean \
+ resolve_btfids resolve_btfids_clean
diff --git a/tools/bpf/bpf_asm.c b/tools/bpf/bpf_asm.c
new file mode 100644
index 0000000000..0063c3c029
--- /dev/null
+++ b/tools/bpf/bpf_asm.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Minimal BPF assembler
+ *
+ * Instead of libpcap high-level filter expressions, it can be quite
+ * useful to define filters in low-level BPF assembler (that is kept
+ * close to Steven McCanne and Van Jacobson's original BPF paper).
+ * In particular for BPF JIT implementors, JIT security auditors, or
+ * just for defining BPF expressions that contain extensions which are
+ * not supported by compilers.
+ *
+ * How to get into it:
+ *
+ * 1) read Documentation/networking/filter.rst
+ * 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary
+ * blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will
+ * pretty print a C-like construct.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ */
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+extern void bpf_asm_compile(FILE *fp, bool cstyle);
+
+int main(int argc, char **argv)
+{
+ FILE *fp = stdin;
+ bool cstyle = false;
+ int i;
+
+ for (i = 1; i < argc; i++) {
+ if (!strncmp("-c", argv[i], 2)) {
+ cstyle = true;
+ continue;
+ }
+
+ fp = fopen(argv[i], "r");
+ if (!fp) {
+ fp = stdin;
+ continue;
+ }
+
+ break;
+ }
+
+ bpf_asm_compile(fp, cstyle);
+
+ return 0;
+}
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
new file mode 100644
index 0000000000..00e560a17b
--- /dev/null
+++ b/tools/bpf/bpf_dbg.c
@@ -0,0 +1,1398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Minimal BPF debugger
+ *
+ * Minimal BPF debugger that mimics the kernel's engine (w/o extensions)
+ * and allows for single stepping through selected packets from a pcap
+ * with a provided user filter in order to facilitate verification of a
+ * BPF program. Besides others, this is useful to verify BPF programs
+ * before attaching to a live system, and can be used in socket filters,
+ * cls_bpf, xt_bpf, team driver and e.g. PTP code; in particular when a
+ * single more complex BPF program is being used. Reasons for a more
+ * complex BPF program are likely primarily to optimize execution time
+ * for making a verdict when multiple simple BPF programs are combined
+ * into one in order to prevent parsing same headers multiple times.
+ *
+ * More on how to debug BPF opcodes see Documentation/networking/filter.rst
+ * which is the main document on BPF. Mini howto for getting started:
+ *
+ * 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'):
+ * 2) > load bpf 6,40 0 0 12,21 0 3 20... (output from `bpf_asm` or
+ * `tcpdump -iem1 -ddd port 22 | tr '\n' ','` to load as filter)
+ * 3) > load pcap foo.pcap
+ * 4) > run <n>/disassemble/dump/quit (self-explanatory)
+ * 5) > breakpoint 2 (sets bp at loaded BPF insns 2, do `run` then;
+ * multiple bps can be set, of course, a call to `breakpoint`
+ * w/o args shows currently loaded bps, `breakpoint reset` for
+ * resetting all breakpoints)
+ * 6) > select 3 (`run` etc will start from the 3rd packet in the pcap)
+ * 7) > step [-<n>, +<n>] (performs single stepping through the BPF)
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <setjmp.h>
+#include <linux/filter.h>
+#include <linux/if_packet.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <signal.h>
+#include <arpa/inet.h>
+#include <net/ethernet.h>
+
+#define TCPDUMP_MAGIC 0xa1b2c3d4
+
+#define BPF_LDX_B (BPF_LDX | BPF_B)
+#define BPF_LDX_W (BPF_LDX | BPF_W)
+#define BPF_JMP_JA (BPF_JMP | BPF_JA)
+#define BPF_JMP_JEQ (BPF_JMP | BPF_JEQ)
+#define BPF_JMP_JGT (BPF_JMP | BPF_JGT)
+#define BPF_JMP_JGE (BPF_JMP | BPF_JGE)
+#define BPF_JMP_JSET (BPF_JMP | BPF_JSET)
+#define BPF_ALU_ADD (BPF_ALU | BPF_ADD)
+#define BPF_ALU_SUB (BPF_ALU | BPF_SUB)
+#define BPF_ALU_MUL (BPF_ALU | BPF_MUL)
+#define BPF_ALU_DIV (BPF_ALU | BPF_DIV)
+#define BPF_ALU_MOD (BPF_ALU | BPF_MOD)
+#define BPF_ALU_NEG (BPF_ALU | BPF_NEG)
+#define BPF_ALU_AND (BPF_ALU | BPF_AND)
+#define BPF_ALU_OR (BPF_ALU | BPF_OR)
+#define BPF_ALU_XOR (BPF_ALU | BPF_XOR)
+#define BPF_ALU_LSH (BPF_ALU | BPF_LSH)
+#define BPF_ALU_RSH (BPF_ALU | BPF_RSH)
+#define BPF_MISC_TAX (BPF_MISC | BPF_TAX)
+#define BPF_MISC_TXA (BPF_MISC | BPF_TXA)
+#define BPF_LD_B (BPF_LD | BPF_B)
+#define BPF_LD_H (BPF_LD | BPF_H)
+#define BPF_LD_W (BPF_LD | BPF_W)
+
+#ifndef array_size
+# define array_size(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+#ifndef __check_format_printf
+# define __check_format_printf(pos_fmtstr, pos_fmtargs) \
+ __attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs))))
+#endif
+
+enum {
+ CMD_OK,
+ CMD_ERR,
+ CMD_EX,
+};
+
+struct shell_cmd {
+ const char *name;
+ int (*func)(char *args);
+};
+
+struct pcap_filehdr {
+ uint32_t magic;
+ uint16_t version_major;
+ uint16_t version_minor;
+ int32_t thiszone;
+ uint32_t sigfigs;
+ uint32_t snaplen;
+ uint32_t linktype;
+};
+
+struct pcap_timeval {
+ int32_t tv_sec;
+ int32_t tv_usec;
+};
+
+struct pcap_pkthdr {
+ struct pcap_timeval ts;
+ uint32_t caplen;
+ uint32_t len;
+};
+
+struct bpf_regs {
+ uint32_t A;
+ uint32_t X;
+ uint32_t M[BPF_MEMWORDS];
+ uint32_t R;
+ bool Rs;
+ uint16_t Pc;
+};
+
+static struct sock_filter bpf_image[BPF_MAXINSNS + 1];
+static unsigned int bpf_prog_len;
+
+static int bpf_breakpoints[64];
+static struct bpf_regs bpf_regs[BPF_MAXINSNS + 1];
+static struct bpf_regs bpf_curr;
+static unsigned int bpf_regs_len;
+
+static int pcap_fd = -1;
+static unsigned int pcap_packet;
+static size_t pcap_map_size;
+static char *pcap_ptr_va_start, *pcap_ptr_va_curr;
+
+static const char * const op_table[] = {
+ [BPF_ST] = "st",
+ [BPF_STX] = "stx",
+ [BPF_LD_B] = "ldb",
+ [BPF_LD_H] = "ldh",
+ [BPF_LD_W] = "ld",
+ [BPF_LDX] = "ldx",
+ [BPF_LDX_B] = "ldxb",
+ [BPF_JMP_JA] = "ja",
+ [BPF_JMP_JEQ] = "jeq",
+ [BPF_JMP_JGT] = "jgt",
+ [BPF_JMP_JGE] = "jge",
+ [BPF_JMP_JSET] = "jset",
+ [BPF_ALU_ADD] = "add",
+ [BPF_ALU_SUB] = "sub",
+ [BPF_ALU_MUL] = "mul",
+ [BPF_ALU_DIV] = "div",
+ [BPF_ALU_MOD] = "mod",
+ [BPF_ALU_NEG] = "neg",
+ [BPF_ALU_AND] = "and",
+ [BPF_ALU_OR] = "or",
+ [BPF_ALU_XOR] = "xor",
+ [BPF_ALU_LSH] = "lsh",
+ [BPF_ALU_RSH] = "rsh",
+ [BPF_MISC_TAX] = "tax",
+ [BPF_MISC_TXA] = "txa",
+ [BPF_RET] = "ret",
+};
+
+static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...)
+{
+ int ret;
+ va_list vl;
+
+ va_start(vl, fmt);
+ ret = vfprintf(rl_outstream, fmt, vl);
+ va_end(vl);
+
+ return ret;
+}
+
+static int matches(const char *cmd, const char *pattern)
+{
+ int len = strlen(cmd);
+
+ if (len > strlen(pattern))
+ return -1;
+
+ return memcmp(pattern, cmd, len);
+}
+
+static void hex_dump(const uint8_t *buf, size_t len)
+{
+ int i;
+
+ rl_printf("%3u: ", 0);
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ rl_printf("\n%3u: ", i);
+ rl_printf("%02x ", buf[i]);
+ }
+ rl_printf("\n");
+}
+
+static bool bpf_prog_loaded(void)
+{
+ if (bpf_prog_len == 0)
+ rl_printf("no bpf program loaded!\n");
+
+ return bpf_prog_len > 0;
+}
+
+static void bpf_disasm(const struct sock_filter f, unsigned int i)
+{
+ const char *op, *fmt;
+ int val = f.k;
+ char buf[256];
+
+ switch (f.code) {
+ case BPF_RET | BPF_K:
+ op = op_table[BPF_RET];
+ fmt = "#%#x";
+ break;
+ case BPF_RET | BPF_A:
+ op = op_table[BPF_RET];
+ fmt = "a";
+ break;
+ case BPF_RET | BPF_X:
+ op = op_table[BPF_RET];
+ fmt = "x";
+ break;
+ case BPF_MISC_TAX:
+ op = op_table[BPF_MISC_TAX];
+ fmt = "";
+ break;
+ case BPF_MISC_TXA:
+ op = op_table[BPF_MISC_TXA];
+ fmt = "";
+ break;
+ case BPF_ST:
+ op = op_table[BPF_ST];
+ fmt = "M[%d]";
+ break;
+ case BPF_STX:
+ op = op_table[BPF_STX];
+ fmt = "M[%d]";
+ break;
+ case BPF_LD_W | BPF_ABS:
+ op = op_table[BPF_LD_W];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_H | BPF_ABS:
+ op = op_table[BPF_LD_H];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_B | BPF_ABS:
+ op = op_table[BPF_LD_B];
+ fmt = "[%d]";
+ break;
+ case BPF_LD_W | BPF_LEN:
+ op = op_table[BPF_LD_W];
+ fmt = "#len";
+ break;
+ case BPF_LD_W | BPF_IND:
+ op = op_table[BPF_LD_W];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD_H | BPF_IND:
+ op = op_table[BPF_LD_H];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD_B | BPF_IND:
+ op = op_table[BPF_LD_B];
+ fmt = "[x+%d]";
+ break;
+ case BPF_LD | BPF_IMM:
+ op = op_table[BPF_LD_W];
+ fmt = "#%#x";
+ break;
+ case BPF_LDX | BPF_IMM:
+ op = op_table[BPF_LDX];
+ fmt = "#%#x";
+ break;
+ case BPF_LDX_B | BPF_MSH:
+ op = op_table[BPF_LDX_B];
+ fmt = "4*([%d]&0xf)";
+ break;
+ case BPF_LD | BPF_MEM:
+ op = op_table[BPF_LD_W];
+ fmt = "M[%d]";
+ break;
+ case BPF_LDX | BPF_MEM:
+ op = op_table[BPF_LDX];
+ fmt = "M[%d]";
+ break;
+ case BPF_JMP_JA:
+ op = op_table[BPF_JMP_JA];
+ fmt = "%d";
+ val = i + 1 + f.k;
+ break;
+ case BPF_JMP_JGT | BPF_X:
+ op = op_table[BPF_JMP_JGT];
+ fmt = "x";
+ break;
+ case BPF_JMP_JGT | BPF_K:
+ op = op_table[BPF_JMP_JGT];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JGE | BPF_X:
+ op = op_table[BPF_JMP_JGE];
+ fmt = "x";
+ break;
+ case BPF_JMP_JGE | BPF_K:
+ op = op_table[BPF_JMP_JGE];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JEQ | BPF_X:
+ op = op_table[BPF_JMP_JEQ];
+ fmt = "x";
+ break;
+ case BPF_JMP_JEQ | BPF_K:
+ op = op_table[BPF_JMP_JEQ];
+ fmt = "#%#x";
+ break;
+ case BPF_JMP_JSET | BPF_X:
+ op = op_table[BPF_JMP_JSET];
+ fmt = "x";
+ break;
+ case BPF_JMP_JSET | BPF_K:
+ op = op_table[BPF_JMP_JSET];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_NEG:
+ op = op_table[BPF_ALU_NEG];
+ fmt = "";
+ break;
+ case BPF_ALU_LSH | BPF_X:
+ op = op_table[BPF_ALU_LSH];
+ fmt = "x";
+ break;
+ case BPF_ALU_LSH | BPF_K:
+ op = op_table[BPF_ALU_LSH];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_RSH | BPF_X:
+ op = op_table[BPF_ALU_RSH];
+ fmt = "x";
+ break;
+ case BPF_ALU_RSH | BPF_K:
+ op = op_table[BPF_ALU_RSH];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_ADD | BPF_X:
+ op = op_table[BPF_ALU_ADD];
+ fmt = "x";
+ break;
+ case BPF_ALU_ADD | BPF_K:
+ op = op_table[BPF_ALU_ADD];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_SUB | BPF_X:
+ op = op_table[BPF_ALU_SUB];
+ fmt = "x";
+ break;
+ case BPF_ALU_SUB | BPF_K:
+ op = op_table[BPF_ALU_SUB];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_MUL | BPF_X:
+ op = op_table[BPF_ALU_MUL];
+ fmt = "x";
+ break;
+ case BPF_ALU_MUL | BPF_K:
+ op = op_table[BPF_ALU_MUL];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_DIV | BPF_X:
+ op = op_table[BPF_ALU_DIV];
+ fmt = "x";
+ break;
+ case BPF_ALU_DIV | BPF_K:
+ op = op_table[BPF_ALU_DIV];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_MOD | BPF_X:
+ op = op_table[BPF_ALU_MOD];
+ fmt = "x";
+ break;
+ case BPF_ALU_MOD | BPF_K:
+ op = op_table[BPF_ALU_MOD];
+ fmt = "#%d";
+ break;
+ case BPF_ALU_AND | BPF_X:
+ op = op_table[BPF_ALU_AND];
+ fmt = "x";
+ break;
+ case BPF_ALU_AND | BPF_K:
+ op = op_table[BPF_ALU_AND];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_OR | BPF_X:
+ op = op_table[BPF_ALU_OR];
+ fmt = "x";
+ break;
+ case BPF_ALU_OR | BPF_K:
+ op = op_table[BPF_ALU_OR];
+ fmt = "#%#x";
+ break;
+ case BPF_ALU_XOR | BPF_X:
+ op = op_table[BPF_ALU_XOR];
+ fmt = "x";
+ break;
+ case BPF_ALU_XOR | BPF_K:
+ op = op_table[BPF_ALU_XOR];
+ fmt = "#%#x";
+ break;
+ default:
+ op = "nosup";
+ fmt = "%#x";
+ val = f.code;
+ break;
+ }
+
+ memset(buf, 0, sizeof(buf));
+ snprintf(buf, sizeof(buf), fmt, val);
+ buf[sizeof(buf) - 1] = 0;
+
+ if ((BPF_CLASS(f.code) == BPF_JMP && BPF_OP(f.code) != BPF_JA))
+ rl_printf("l%d:\t%s %s, l%d, l%d\n", i, op, buf,
+ i + 1 + f.jt, i + 1 + f.jf);
+ else
+ rl_printf("l%d:\t%s %s\n", i, op, buf);
+}
+
+static void bpf_dump_curr(struct bpf_regs *r, struct sock_filter *f)
+{
+ int i, m = 0;
+
+ rl_printf("pc: [%u]\n", r->Pc);
+ rl_printf("code: [%u] jt[%u] jf[%u] k[%u]\n",
+ f->code, f->jt, f->jf, f->k);
+ rl_printf("curr: ");
+ bpf_disasm(*f, r->Pc);
+
+ if (f->jt || f->jf) {
+ rl_printf("jt: ");
+ bpf_disasm(*(f + f->jt + 1), r->Pc + f->jt + 1);
+ rl_printf("jf: ");
+ bpf_disasm(*(f + f->jf + 1), r->Pc + f->jf + 1);
+ }
+
+ rl_printf("A: [%#08x][%u]\n", r->A, r->A);
+ rl_printf("X: [%#08x][%u]\n", r->X, r->X);
+ if (r->Rs)
+ rl_printf("ret: [%#08x][%u]!\n", r->R, r->R);
+
+ for (i = 0; i < BPF_MEMWORDS; i++) {
+ if (r->M[i]) {
+ m++;
+ rl_printf("M[%d]: [%#08x][%u]\n", i, r->M[i], r->M[i]);
+ }
+ }
+ if (m == 0)
+ rl_printf("M[0,%d]: [%#08x][%u]\n", BPF_MEMWORDS - 1, 0, 0);
+}
+
+static void bpf_dump_pkt(uint8_t *pkt, uint32_t pkt_caplen, uint32_t pkt_len)
+{
+ if (pkt_caplen != pkt_len)
+ rl_printf("cap: %u, len: %u\n", pkt_caplen, pkt_len);
+ else
+ rl_printf("len: %u\n", pkt_len);
+
+ hex_dump(pkt, pkt_caplen);
+}
+
+static void bpf_disasm_all(const struct sock_filter *f, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len; i++)
+ bpf_disasm(f[i], i);
+}
+
+static void bpf_dump_all(const struct sock_filter *f, unsigned int len)
+{
+ unsigned int i;
+
+ rl_printf("/* { op, jt, jf, k }, */\n");
+ for (i = 0; i < len; i++)
+ rl_printf("{ %#04x, %2u, %2u, %#010x },\n",
+ f[i].code, f[i].jt, f[i].jf, f[i].k);
+}
+
+static bool bpf_runnable(struct sock_filter *f, unsigned int len)
+{
+ int sock, ret, i;
+ struct sock_fprog bpf = {
+ .filter = f,
+ .len = len,
+ };
+
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock < 0) {
+ rl_printf("cannot open socket!\n");
+ return false;
+ }
+ ret = setsockopt(sock, SOL_SOCKET, SO_ATTACH_FILTER, &bpf, sizeof(bpf));
+ close(sock);
+ if (ret < 0) {
+ rl_printf("program not allowed to run by kernel!\n");
+ return false;
+ }
+ for (i = 0; i < len; i++) {
+ if (BPF_CLASS(f[i].code) == BPF_LD &&
+ f[i].k > SKF_AD_OFF) {
+ rl_printf("extensions currently not supported!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void bpf_reset_breakpoints(void)
+{
+ int i;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++)
+ bpf_breakpoints[i] = -1;
+}
+
+static void bpf_set_breakpoints(unsigned int where)
+{
+ int i;
+ bool set = false;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] == (int) where) {
+ rl_printf("breakpoint already set!\n");
+ set = true;
+ break;
+ }
+
+ if (bpf_breakpoints[i] == -1 && set == false) {
+ bpf_breakpoints[i] = where;
+ set = true;
+ }
+ }
+
+ if (!set)
+ rl_printf("too many breakpoints set, reset first!\n");
+}
+
+static void bpf_dump_breakpoints(void)
+{
+ int i;
+
+ rl_printf("breakpoints: ");
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] < 0)
+ continue;
+ rl_printf("%d ", bpf_breakpoints[i]);
+ }
+
+ rl_printf("\n");
+}
+
+static void bpf_reset(void)
+{
+ bpf_regs_len = 0;
+
+ memset(bpf_regs, 0, sizeof(bpf_regs));
+ memset(&bpf_curr, 0, sizeof(bpf_curr));
+}
+
+static void bpf_safe_regs(void)
+{
+ memcpy(&bpf_regs[bpf_regs_len++], &bpf_curr, sizeof(bpf_curr));
+}
+
+static bool bpf_restore_regs(int off)
+{
+ unsigned int index = bpf_regs_len - 1 + off;
+
+ if (index == 0) {
+ bpf_reset();
+ return true;
+ } else if (index < bpf_regs_len) {
+ memcpy(&bpf_curr, &bpf_regs[index], sizeof(bpf_curr));
+ bpf_regs_len = index;
+ return true;
+ } else {
+ rl_printf("reached bottom of register history stack!\n");
+ return false;
+ }
+}
+
+static uint32_t extract_u32(uint8_t *pkt, uint32_t off)
+{
+ uint32_t r;
+
+ memcpy(&r, &pkt[off], sizeof(r));
+
+ return ntohl(r);
+}
+
+static uint16_t extract_u16(uint8_t *pkt, uint32_t off)
+{
+ uint16_t r;
+
+ memcpy(&r, &pkt[off], sizeof(r));
+
+ return ntohs(r);
+}
+
+static uint8_t extract_u8(uint8_t *pkt, uint32_t off)
+{
+ return pkt[off];
+}
+
+static void set_return(struct bpf_regs *r)
+{
+ r->R = 0;
+ r->Rs = true;
+}
+
+static void bpf_single_step(struct bpf_regs *r, struct sock_filter *f,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len)
+{
+ uint32_t K = f->k;
+ int d;
+
+ switch (f->code) {
+ case BPF_RET | BPF_K:
+ r->R = K;
+ r->Rs = true;
+ break;
+ case BPF_RET | BPF_A:
+ r->R = r->A;
+ r->Rs = true;
+ break;
+ case BPF_RET | BPF_X:
+ r->R = r->X;
+ r->Rs = true;
+ break;
+ case BPF_MISC_TAX:
+ r->X = r->A;
+ break;
+ case BPF_MISC_TXA:
+ r->A = r->X;
+ break;
+ case BPF_ST:
+ r->M[K] = r->A;
+ break;
+ case BPF_STX:
+ r->M[K] = r->X;
+ break;
+ case BPF_LD_W | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint32_t))
+ r->A = extract_u32(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_H | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint16_t))
+ r->A = extract_u16(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_B | BPF_ABS:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint8_t))
+ r->A = extract_u8(pkt, K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_W | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint32_t))
+ r->A = extract_u32(pkt, r->X + K);
+ break;
+ case BPF_LD_H | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint16_t))
+ r->A = extract_u16(pkt, r->X + K);
+ else
+ set_return(r);
+ break;
+ case BPF_LD_B | BPF_IND:
+ d = pkt_caplen - (r->X + K);
+ if (d >= sizeof(uint8_t))
+ r->A = extract_u8(pkt, r->X + K);
+ else
+ set_return(r);
+ break;
+ case BPF_LDX_B | BPF_MSH:
+ d = pkt_caplen - K;
+ if (d >= sizeof(uint8_t)) {
+ r->X = extract_u8(pkt, K);
+ r->X = (r->X & 0xf) << 2;
+ } else
+ set_return(r);
+ break;
+ case BPF_LD_W | BPF_LEN:
+ r->A = pkt_len;
+ break;
+ case BPF_LDX_W | BPF_LEN:
+ r->A = pkt_len;
+ break;
+ case BPF_LD | BPF_IMM:
+ r->A = K;
+ break;
+ case BPF_LDX | BPF_IMM:
+ r->X = K;
+ break;
+ case BPF_LD | BPF_MEM:
+ r->A = r->M[K];
+ break;
+ case BPF_LDX | BPF_MEM:
+ r->X = r->M[K];
+ break;
+ case BPF_JMP_JA:
+ r->Pc += K;
+ break;
+ case BPF_JMP_JGT | BPF_X:
+ r->Pc += r->A > r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGT | BPF_K:
+ r->Pc += r->A > K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGE | BPF_X:
+ r->Pc += r->A >= r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JGE | BPF_K:
+ r->Pc += r->A >= K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JEQ | BPF_X:
+ r->Pc += r->A == r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JEQ | BPF_K:
+ r->Pc += r->A == K ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JSET | BPF_X:
+ r->Pc += r->A & r->X ? f->jt : f->jf;
+ break;
+ case BPF_JMP_JSET | BPF_K:
+ r->Pc += r->A & K ? f->jt : f->jf;
+ break;
+ case BPF_ALU_NEG:
+ r->A = -r->A;
+ break;
+ case BPF_ALU_LSH | BPF_X:
+ r->A <<= r->X;
+ break;
+ case BPF_ALU_LSH | BPF_K:
+ r->A <<= K;
+ break;
+ case BPF_ALU_RSH | BPF_X:
+ r->A >>= r->X;
+ break;
+ case BPF_ALU_RSH | BPF_K:
+ r->A >>= K;
+ break;
+ case BPF_ALU_ADD | BPF_X:
+ r->A += r->X;
+ break;
+ case BPF_ALU_ADD | BPF_K:
+ r->A += K;
+ break;
+ case BPF_ALU_SUB | BPF_X:
+ r->A -= r->X;
+ break;
+ case BPF_ALU_SUB | BPF_K:
+ r->A -= K;
+ break;
+ case BPF_ALU_MUL | BPF_X:
+ r->A *= r->X;
+ break;
+ case BPF_ALU_MUL | BPF_K:
+ r->A *= K;
+ break;
+ case BPF_ALU_DIV | BPF_X:
+ case BPF_ALU_MOD | BPF_X:
+ if (r->X == 0) {
+ set_return(r);
+ break;
+ }
+ goto do_div;
+ case BPF_ALU_DIV | BPF_K:
+ case BPF_ALU_MOD | BPF_K:
+ if (K == 0) {
+ set_return(r);
+ break;
+ }
+do_div:
+ switch (f->code) {
+ case BPF_ALU_DIV | BPF_X:
+ r->A /= r->X;
+ break;
+ case BPF_ALU_DIV | BPF_K:
+ r->A /= K;
+ break;
+ case BPF_ALU_MOD | BPF_X:
+ r->A %= r->X;
+ break;
+ case BPF_ALU_MOD | BPF_K:
+ r->A %= K;
+ break;
+ }
+ break;
+ case BPF_ALU_AND | BPF_X:
+ r->A &= r->X;
+ break;
+ case BPF_ALU_AND | BPF_K:
+ r->A &= K;
+ break;
+ case BPF_ALU_OR | BPF_X:
+ r->A |= r->X;
+ break;
+ case BPF_ALU_OR | BPF_K:
+ r->A |= K;
+ break;
+ case BPF_ALU_XOR | BPF_X:
+ r->A ^= r->X;
+ break;
+ case BPF_ALU_XOR | BPF_K:
+ r->A ^= K;
+ break;
+ }
+}
+
+static bool bpf_pc_has_breakpoint(uint16_t pc)
+{
+ int i;
+
+ for (i = 0; i < array_size(bpf_breakpoints); i++) {
+ if (bpf_breakpoints[i] < 0)
+ continue;
+ if (bpf_breakpoints[i] == pc)
+ return true;
+ }
+
+ return false;
+}
+
+static bool bpf_handle_breakpoint(struct bpf_regs *r, struct sock_filter *f,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len)
+{
+ rl_printf("-- register dump --\n");
+ bpf_dump_curr(r, &f[r->Pc]);
+ rl_printf("-- packet dump --\n");
+ bpf_dump_pkt(pkt, pkt_caplen, pkt_len);
+ rl_printf("(breakpoint)\n");
+ return true;
+}
+
+static int bpf_run_all(struct sock_filter *f, uint16_t bpf_len, uint8_t *pkt,
+ uint32_t pkt_caplen, uint32_t pkt_len)
+{
+ bool stop = false;
+
+ while (bpf_curr.Rs == false && stop == false) {
+ bpf_safe_regs();
+
+ if (bpf_pc_has_breakpoint(bpf_curr.Pc))
+ stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
+ pkt_caplen, pkt_len);
+
+ bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
+ pkt_len);
+ bpf_curr.Pc++;
+ }
+
+ return stop ? -1 : bpf_curr.R;
+}
+
+static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len,
+ uint8_t *pkt, uint32_t pkt_caplen,
+ uint32_t pkt_len, int next)
+{
+ bool stop = false;
+ int i = 1;
+
+ while (!bpf_curr.Rs && !stop) {
+ bpf_safe_regs();
+
+ if (i++ == next)
+ stop = bpf_handle_breakpoint(&bpf_curr, f, pkt,
+ pkt_caplen, pkt_len);
+
+ bpf_single_step(&bpf_curr, &f[bpf_curr.Pc], pkt, pkt_caplen,
+ pkt_len);
+ bpf_curr.Pc++;
+ }
+
+ return stop ? -1 : bpf_curr.R;
+}
+
+static bool pcap_loaded(void)
+{
+ if (pcap_fd < 0)
+ rl_printf("no pcap file loaded!\n");
+
+ return pcap_fd >= 0;
+}
+
+static struct pcap_pkthdr *pcap_curr_pkt(void)
+{
+ return (void *) pcap_ptr_va_curr;
+}
+
+static bool pcap_next_pkt(void)
+{
+ struct pcap_pkthdr *hdr = pcap_curr_pkt();
+
+ if (pcap_ptr_va_curr + sizeof(*hdr) -
+ pcap_ptr_va_start >= pcap_map_size)
+ return false;
+ if (hdr->caplen == 0 || hdr->len == 0 || hdr->caplen > hdr->len)
+ return false;
+ if (pcap_ptr_va_curr + sizeof(*hdr) + hdr->caplen -
+ pcap_ptr_va_start >= pcap_map_size)
+ return false;
+
+ pcap_ptr_va_curr += (sizeof(*hdr) + hdr->caplen);
+ return true;
+}
+
+static void pcap_reset_pkt(void)
+{
+ pcap_ptr_va_curr = pcap_ptr_va_start + sizeof(struct pcap_filehdr);
+}
+
+static int try_load_pcap(const char *file)
+{
+ struct pcap_filehdr *hdr;
+ struct stat sb;
+ int ret;
+
+ pcap_fd = open(file, O_RDONLY);
+ if (pcap_fd < 0) {
+ rl_printf("cannot open pcap [%s]!\n", strerror(errno));
+ return CMD_ERR;
+ }
+
+ ret = fstat(pcap_fd, &sb);
+ if (ret < 0) {
+ rl_printf("cannot fstat pcap file!\n");
+ return CMD_ERR;
+ }
+
+ if (!S_ISREG(sb.st_mode)) {
+ rl_printf("not a regular pcap file, duh!\n");
+ return CMD_ERR;
+ }
+
+ pcap_map_size = sb.st_size;
+ if (pcap_map_size <= sizeof(struct pcap_filehdr)) {
+ rl_printf("pcap file too small!\n");
+ return CMD_ERR;
+ }
+
+ pcap_ptr_va_start = mmap(NULL, pcap_map_size, PROT_READ,
+ MAP_SHARED | MAP_LOCKED, pcap_fd, 0);
+ if (pcap_ptr_va_start == MAP_FAILED) {
+ rl_printf("mmap of file failed!");
+ return CMD_ERR;
+ }
+
+ hdr = (void *) pcap_ptr_va_start;
+ if (hdr->magic != TCPDUMP_MAGIC) {
+ rl_printf("wrong pcap magic!\n");
+ return CMD_ERR;
+ }
+
+ pcap_reset_pkt();
+
+ return CMD_OK;
+
+}
+
+static void try_close_pcap(void)
+{
+ if (pcap_fd >= 0) {
+ munmap(pcap_ptr_va_start, pcap_map_size);
+ close(pcap_fd);
+
+ pcap_ptr_va_start = pcap_ptr_va_curr = NULL;
+ pcap_map_size = 0;
+ pcap_packet = 0;
+ pcap_fd = -1;
+ }
+}
+
+static int cmd_load_bpf(char *bpf_string)
+{
+ char sp, *token, separator = ',';
+ unsigned short bpf_len, i = 0;
+ struct sock_filter tmp;
+
+ bpf_prog_len = 0;
+ memset(bpf_image, 0, sizeof(bpf_image));
+
+ if (sscanf(bpf_string, "%hu%c", &bpf_len, &sp) != 2 ||
+ sp != separator || bpf_len > BPF_MAXINSNS || bpf_len == 0) {
+ rl_printf("syntax error in head length encoding!\n");
+ return CMD_ERR;
+ }
+
+ token = bpf_string;
+ while ((token = strchr(token, separator)) && (++token)[0]) {
+ if (i >= bpf_len) {
+ rl_printf("program exceeds encoded length!\n");
+ return CMD_ERR;
+ }
+
+ if (sscanf(token, "%hu %hhu %hhu %u,",
+ &tmp.code, &tmp.jt, &tmp.jf, &tmp.k) != 4) {
+ rl_printf("syntax error at instruction %d!\n", i);
+ return CMD_ERR;
+ }
+
+ bpf_image[i].code = tmp.code;
+ bpf_image[i].jt = tmp.jt;
+ bpf_image[i].jf = tmp.jf;
+ bpf_image[i].k = tmp.k;
+
+ i++;
+ }
+
+ if (i != bpf_len) {
+ rl_printf("syntax error exceeding encoded length!\n");
+ return CMD_ERR;
+ } else
+ bpf_prog_len = bpf_len;
+ if (!bpf_runnable(bpf_image, bpf_prog_len))
+ bpf_prog_len = 0;
+
+ return CMD_OK;
+}
+
+static int cmd_load_pcap(char *file)
+{
+ char *file_trim, *tmp;
+
+ file_trim = strtok_r(file, " ", &tmp);
+ if (file_trim == NULL)
+ return CMD_ERR;
+
+ try_close_pcap();
+
+ return try_load_pcap(file_trim);
+}
+
+static int cmd_load(char *arg)
+{
+ char *subcmd, *cont = NULL, *tmp = strdup(arg);
+ int ret = CMD_OK;
+
+ subcmd = strtok_r(tmp, " ", &cont);
+ if (subcmd == NULL)
+ goto out;
+ if (matches(subcmd, "bpf") == 0) {
+ bpf_reset();
+ bpf_reset_breakpoints();
+
+ if (!cont)
+ ret = CMD_ERR;
+ else
+ ret = cmd_load_bpf(cont);
+ } else if (matches(subcmd, "pcap") == 0) {
+ ret = cmd_load_pcap(cont);
+ } else {
+out:
+ rl_printf("bpf <code>: load bpf code\n");
+ rl_printf("pcap <file>: load pcap file\n");
+ ret = CMD_ERR;
+ }
+
+ free(tmp);
+ return ret;
+}
+
+static int cmd_step(char *num)
+{
+ struct pcap_pkthdr *hdr;
+ int steps, ret;
+
+ if (!bpf_prog_loaded() || !pcap_loaded())
+ return CMD_ERR;
+
+ steps = strtol(num, NULL, 10);
+ if (steps == 0 || strlen(num) == 0)
+ steps = 1;
+ if (steps < 0) {
+ if (!bpf_restore_regs(steps))
+ return CMD_ERR;
+ steps = 1;
+ }
+
+ hdr = pcap_curr_pkt();
+ ret = bpf_run_stepping(bpf_image, bpf_prog_len,
+ (uint8_t *) hdr + sizeof(*hdr),
+ hdr->caplen, hdr->len, steps);
+ if (ret >= 0 || bpf_curr.Rs) {
+ bpf_reset();
+ if (!pcap_next_pkt()) {
+ rl_printf("(going back to first packet)\n");
+ pcap_reset_pkt();
+ } else {
+ rl_printf("(next packet)\n");
+ }
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_select(char *num)
+{
+ unsigned int which, i;
+ bool have_next = true;
+
+ if (!pcap_loaded() || strlen(num) == 0)
+ return CMD_ERR;
+
+ which = strtoul(num, NULL, 10);
+ if (which == 0) {
+ rl_printf("packet count starts with 1, clamping!\n");
+ which = 1;
+ }
+
+ pcap_reset_pkt();
+ bpf_reset();
+
+ for (i = 0; i < which && (have_next = pcap_next_pkt()); i++)
+ /* noop */;
+ if (!have_next || pcap_curr_pkt() == NULL) {
+ rl_printf("no packet #%u available!\n", which);
+ pcap_reset_pkt();
+ return CMD_ERR;
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_breakpoint(char *subcmd)
+{
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+ if (strlen(subcmd) == 0)
+ bpf_dump_breakpoints();
+ else if (matches(subcmd, "reset") == 0)
+ bpf_reset_breakpoints();
+ else {
+ unsigned int where = strtoul(subcmd, NULL, 10);
+
+ if (where < bpf_prog_len) {
+ bpf_set_breakpoints(where);
+ rl_printf("breakpoint at: ");
+ bpf_disasm(bpf_image[where], where);
+ }
+ }
+
+ return CMD_OK;
+}
+
+static int cmd_run(char *num)
+{
+ static uint32_t pass, fail;
+ bool has_limit = true;
+ int pkts = 0, i = 0;
+
+ if (!bpf_prog_loaded() || !pcap_loaded())
+ return CMD_ERR;
+
+ pkts = strtol(num, NULL, 10);
+ if (pkts == 0 || strlen(num) == 0)
+ has_limit = false;
+
+ do {
+ struct pcap_pkthdr *hdr = pcap_curr_pkt();
+ int ret = bpf_run_all(bpf_image, bpf_prog_len,
+ (uint8_t *) hdr + sizeof(*hdr),
+ hdr->caplen, hdr->len);
+ if (ret > 0)
+ pass++;
+ else if (ret == 0)
+ fail++;
+ else
+ return CMD_OK;
+ bpf_reset();
+ } while (pcap_next_pkt() && (!has_limit || (++i < pkts)));
+
+ rl_printf("bpf passes:%u fails:%u\n", pass, fail);
+
+ pcap_reset_pkt();
+ bpf_reset();
+
+ pass = fail = 0;
+ return CMD_OK;
+}
+
+static int cmd_disassemble(char *line_string)
+{
+ bool single_line = false;
+ unsigned long line;
+
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+ if (strlen(line_string) > 0 &&
+ (line = strtoul(line_string, NULL, 10)) < bpf_prog_len)
+ single_line = true;
+ if (single_line)
+ bpf_disasm(bpf_image[line], line);
+ else
+ bpf_disasm_all(bpf_image, bpf_prog_len);
+
+ return CMD_OK;
+}
+
+static int cmd_dump(char *dontcare)
+{
+ if (!bpf_prog_loaded())
+ return CMD_ERR;
+
+ bpf_dump_all(bpf_image, bpf_prog_len);
+
+ return CMD_OK;
+}
+
+static int cmd_quit(char *dontcare)
+{
+ return CMD_EX;
+}
+
+static const struct shell_cmd cmds[] = {
+ { .name = "load", .func = cmd_load },
+ { .name = "select", .func = cmd_select },
+ { .name = "step", .func = cmd_step },
+ { .name = "run", .func = cmd_run },
+ { .name = "breakpoint", .func = cmd_breakpoint },
+ { .name = "disassemble", .func = cmd_disassemble },
+ { .name = "dump", .func = cmd_dump },
+ { .name = "quit", .func = cmd_quit },
+};
+
+static int execf(char *arg)
+{
+ char *cmd, *cont, *tmp = strdup(arg);
+ int i, ret = 0, len;
+
+ cmd = strtok_r(tmp, " ", &cont);
+ if (cmd == NULL)
+ goto out;
+ len = strlen(cmd);
+ for (i = 0; i < array_size(cmds); i++) {
+ if (len != strlen(cmds[i].name))
+ continue;
+ if (strncmp(cmds[i].name, cmd, len) == 0) {
+ ret = cmds[i].func(cont);
+ break;
+ }
+ }
+out:
+ free(tmp);
+ return ret;
+}
+
+static char *shell_comp_gen(const char *buf, int state)
+{
+ static int list_index, len;
+
+ if (!state) {
+ list_index = 0;
+ len = strlen(buf);
+ }
+
+ for (; list_index < array_size(cmds); ) {
+ const char *name = cmds[list_index].name;
+
+ list_index++;
+ if (strncmp(name, buf, len) == 0)
+ return strdup(name);
+ }
+
+ return NULL;
+}
+
+static char **shell_completion(const char *buf, int start, int end)
+{
+ char **matches = NULL;
+
+ if (start == 0)
+ matches = rl_completion_matches(buf, shell_comp_gen);
+
+ return matches;
+}
+
+static void intr_shell(int sig)
+{
+ if (rl_end)
+ rl_kill_line(-1, 0);
+
+ rl_crlf();
+ rl_refresh_line(0, 0);
+ rl_free_line_state();
+}
+
+static void init_shell(FILE *fin, FILE *fout)
+{
+ char file[128];
+
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
+ read_history(file);
+
+ rl_instream = fin;
+ rl_outstream = fout;
+
+ rl_readline_name = "bpf_dbg";
+ rl_terminal_name = getenv("TERM");
+
+ rl_catch_signals = 0;
+ rl_catch_sigwinch = 1;
+
+ rl_attempted_completion_function = shell_completion;
+
+ rl_bind_key('\t', rl_complete);
+
+ rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap);
+ rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap);
+
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME"));
+ rl_read_init_file(file);
+
+ rl_prep_terminal(0);
+ rl_set_signals();
+
+ signal(SIGINT, intr_shell);
+}
+
+static void exit_shell(FILE *fin, FILE *fout)
+{
+ char file[128];
+
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
+ write_history(file);
+
+ clear_history();
+ rl_deprep_terminal();
+
+ try_close_pcap();
+
+ if (fin != stdin)
+ fclose(fin);
+ if (fout != stdout)
+ fclose(fout);
+}
+
+static int run_shell_loop(FILE *fin, FILE *fout)
+{
+ char *buf;
+
+ init_shell(fin, fout);
+
+ while ((buf = readline("> ")) != NULL) {
+ int ret = execf(buf);
+ if (ret == CMD_EX)
+ break;
+ if (ret == CMD_OK && strlen(buf) > 0)
+ add_history(buf);
+
+ free(buf);
+ }
+
+ exit_shell(fin, fout);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ FILE *fin = NULL, *fout = NULL;
+
+ if (argc >= 2)
+ fin = fopen(argv[1], "r");
+ if (argc >= 3)
+ fout = fopen(argv[2], "w");
+
+ return run_shell_loop(fin ? : stdin, fout ? : stdout);
+}
diff --git a/tools/bpf/bpf_exp.l b/tools/bpf/bpf_exp.l
new file mode 100644
index 0000000000..4da8d053d6
--- /dev/null
+++ b/tools/bpf/bpf_exp.l
@@ -0,0 +1,198 @@
+/*
+ * BPF asm code lexer
+ *
+ * This program is free software; you can distribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Syntax kept close to:
+ *
+ * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new
+ * architecture for user-level packet capture. In Proceedings of the
+ * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993
+ * Conference Proceedings (USENIX'93). USENIX Association, Berkeley,
+ * CA, USA, 2-2.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+%{
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <linux/filter.h>
+
+#include "bpf_exp.yacc.h"
+
+extern void yyerror(const char *str);
+
+%}
+
+%option align
+%option ecs
+
+%option nounput
+%option noreject
+%option noinput
+%option noyywrap
+
+%option 8bit
+%option caseless
+%option yylineno
+
+%%
+
+"ldb" { return OP_LDB; }
+"ldh" { return OP_LDH; }
+"ld" { return OP_LD; }
+"ldi" { return OP_LDI; }
+"ldx" { return OP_LDX; }
+"ldxi" { return OP_LDXI; }
+"ldxb" { return OP_LDXB; }
+"st" { return OP_ST; }
+"stx" { return OP_STX; }
+"jmp" { return OP_JMP; }
+"ja" { return OP_JMP; }
+"jeq" { return OP_JEQ; }
+"jneq" { return OP_JNEQ; }
+"jne" { return OP_JNEQ; }
+"jlt" { return OP_JLT; }
+"jle" { return OP_JLE; }
+"jgt" { return OP_JGT; }
+"jge" { return OP_JGE; }
+"jset" { return OP_JSET; }
+"add" { return OP_ADD; }
+"sub" { return OP_SUB; }
+"mul" { return OP_MUL; }
+"div" { return OP_DIV; }
+"mod" { return OP_MOD; }
+"neg" { return OP_NEG; }
+"and" { return OP_AND; }
+"xor" { return OP_XOR; }
+"or" { return OP_OR; }
+"lsh" { return OP_LSH; }
+"rsh" { return OP_RSH; }
+"ret" { return OP_RET; }
+"tax" { return OP_TAX; }
+"txa" { return OP_TXA; }
+
+"#"?("len") { return K_PKT_LEN; }
+
+"#"?("proto") {
+ yylval.number = SKF_AD_PROTOCOL;
+ return extension;
+ }
+"#"?("type") {
+ yylval.number = SKF_AD_PKTTYPE;
+ return extension;
+ }
+"#"?("poff") {
+ yylval.number = SKF_AD_PAY_OFFSET;
+ return extension;
+ }
+"#"?("ifidx") {
+ yylval.number = SKF_AD_IFINDEX;
+ return extension;
+ }
+"#"?("nla") {
+ yylval.number = SKF_AD_NLATTR;
+ return extension;
+ }
+"#"?("nlan") {
+ yylval.number = SKF_AD_NLATTR_NEST;
+ return extension;
+ }
+"#"?("mark") {
+ yylval.number = SKF_AD_MARK;
+ return extension;
+ }
+"#"?("queue") {
+ yylval.number = SKF_AD_QUEUE;
+ return extension;
+ }
+"#"?("hatype") {
+ yylval.number = SKF_AD_HATYPE;
+ return extension;
+ }
+"#"?("rxhash") {
+ yylval.number = SKF_AD_RXHASH;
+ return extension;
+ }
+"#"?("cpu") {
+ yylval.number = SKF_AD_CPU;
+ return extension;
+ }
+"#"?("vlan_tci") {
+ yylval.number = SKF_AD_VLAN_TAG;
+ return extension;
+ }
+"#"?("vlan_pr") {
+ yylval.number = SKF_AD_VLAN_TAG_PRESENT;
+ return extension;
+ }
+"#"?("vlan_avail") {
+ yylval.number = SKF_AD_VLAN_TAG_PRESENT;
+ return extension;
+ }
+"#"?("vlan_tpid") {
+ yylval.number = SKF_AD_VLAN_TPID;
+ return extension;
+ }
+"#"?("rand") {
+ yylval.number = SKF_AD_RANDOM;
+ return extension;
+ }
+
+":" { return ':'; }
+"," { return ','; }
+"#" { return '#'; }
+"%" { return '%'; }
+"[" { return '['; }
+"]" { return ']'; }
+"(" { return '('; }
+")" { return ')'; }
+"x" { return 'x'; }
+"a" { return 'a'; }
+"+" { return '+'; }
+"M" { return 'M'; }
+"*" { return '*'; }
+"&" { return '&'; }
+
+([0][x][a-fA-F0-9]+) {
+ yylval.number = strtoul(yytext, NULL, 16);
+ return number;
+ }
+([0][b][0-1]+) {
+ yylval.number = strtol(yytext + 2, NULL, 2);
+ return number;
+ }
+(([0])|([-+]?[1-9][0-9]*)) {
+ yylval.number = strtol(yytext, NULL, 10);
+ return number;
+ }
+([0][0-7]+) {
+ yylval.number = strtol(yytext + 1, NULL, 8);
+ return number;
+ }
+[a-zA-Z_][a-zA-Z0-9_]+ {
+ yylval.label = strdup(yytext);
+ return label;
+ }
+
+"/*"([^\*]|\*[^/])*"*/" { /* NOP */ }
+";"[^\n]* { /* NOP */ }
+^#.* { /* NOP */ }
+[ \t]+ { /* NOP */ }
+[ \n]+ { /* NOP */ }
+
+. {
+ printf("unknown character \'%s\'", yytext);
+ yyerror("lex unknown character");
+ }
+
+%%
diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y
new file mode 100644
index 0000000000..dfb7254a24
--- /dev/null
+++ b/tools/bpf/bpf_exp.y
@@ -0,0 +1,668 @@
+/*
+ * BPF asm code parser
+ *
+ * This program is free software; you can distribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Syntax kept close to:
+ *
+ * Steven McCanne and Van Jacobson. 1993. The BSD packet filter: a new
+ * architecture for user-level packet capture. In Proceedings of the
+ * USENIX Winter 1993 Conference Proceedings on USENIX Winter 1993
+ * Conference Proceedings (USENIX'93). USENIX Association, Berkeley,
+ * CA, USA, 2-2.
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+%{
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <linux/filter.h>
+
+#include "bpf_exp.yacc.h"
+
+enum jmp_type { JTL, JFL, JKL };
+
+extern FILE *yyin;
+extern int yylineno;
+extern int yylex(void);
+extern void yyerror(const char *str);
+
+extern void bpf_asm_compile(FILE *fp, bool cstyle);
+static void bpf_set_curr_instr(uint16_t op, uint8_t jt, uint8_t jf, uint32_t k);
+static void bpf_set_curr_label(char *label);
+static void bpf_set_jmp_label(char *label, enum jmp_type type);
+
+%}
+
+%union {
+ char *label;
+ uint32_t number;
+}
+
+%token OP_LDB OP_LDH OP_LD OP_LDX OP_ST OP_STX OP_JMP OP_JEQ OP_JGT OP_JGE
+%token OP_JSET OP_ADD OP_SUB OP_MUL OP_DIV OP_AND OP_OR OP_XOR OP_LSH OP_RSH
+%token OP_RET OP_TAX OP_TXA OP_LDXB OP_MOD OP_NEG OP_JNEQ OP_JLT OP_JLE OP_LDI
+%token OP_LDXI
+
+%token K_PKT_LEN
+
+%token ':' ',' '[' ']' '(' ')' 'x' 'a' '+' 'M' '*' '&' '#' '%'
+
+%token extension number label
+
+%type <label> label
+%type <number> extension
+%type <number> number
+
+%%
+
+prog
+ : line
+ | prog line
+ ;
+
+line
+ : instr
+ | labelled_instr
+ ;
+
+labelled_instr
+ : labelled instr
+ ;
+
+instr
+ : ldb
+ | ldh
+ | ld
+ | ldi
+ | ldx
+ | ldxi
+ | st
+ | stx
+ | jmp
+ | jeq
+ | jneq
+ | jlt
+ | jle
+ | jgt
+ | jge
+ | jset
+ | add
+ | sub
+ | mul
+ | div
+ | mod
+ | neg
+ | and
+ | or
+ | xor
+ | lsh
+ | rsh
+ | ret
+ | tax
+ | txa
+ ;
+
+labelled
+ : label ':' { bpf_set_curr_label($1); }
+ ;
+
+ldb
+ : OP_LDB '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $5); }
+ | OP_LDB '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_IND, 0, 0, $6); }
+ | OP_LDB '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0, $3); }
+ | OP_LDB extension {
+ bpf_set_curr_instr(BPF_LD | BPF_B | BPF_ABS, 0, 0,
+ SKF_AD_OFF + $2); }
+ ;
+
+ldh
+ : OP_LDH '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $5); }
+ | OP_LDH '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_IND, 0, 0, $6); }
+ | OP_LDH '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0, $3); }
+ | OP_LDH extension {
+ bpf_set_curr_instr(BPF_LD | BPF_H | BPF_ABS, 0, 0,
+ SKF_AD_OFF + $2); }
+ ;
+
+ldi
+ : OP_LDI '#' number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); }
+ | OP_LDI number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $2); }
+ ;
+
+ld
+ : OP_LD '#' number {
+ bpf_set_curr_instr(BPF_LD | BPF_IMM, 0, 0, $3); }
+ | OP_LD K_PKT_LEN {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_LEN, 0, 0, 0); }
+ | OP_LD extension {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0,
+ SKF_AD_OFF + $2); }
+ | OP_LD 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_MEM, 0, 0, $4); }
+ | OP_LD '[' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $5); }
+ | OP_LD '[' '%' 'x' '+' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_IND, 0, 0, $6); }
+ | OP_LD '[' number ']' {
+ bpf_set_curr_instr(BPF_LD | BPF_W | BPF_ABS, 0, 0, $3); }
+ ;
+
+ldxi
+ : OP_LDXI '#' number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); }
+ | OP_LDXI number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $2); }
+ ;
+
+ldx
+ : OP_LDX '#' number {
+ bpf_set_curr_instr(BPF_LDX | BPF_IMM, 0, 0, $3); }
+ | OP_LDX K_PKT_LEN {
+ bpf_set_curr_instr(BPF_LDX | BPF_W | BPF_LEN, 0, 0, 0); }
+ | OP_LDX 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_LDX | BPF_MEM, 0, 0, $4); }
+ | OP_LDXB number '*' '(' '[' number ']' '&' number ')' {
+ if ($2 != 4 || $9 != 0xf) {
+ fprintf(stderr, "ldxb offset not supported!\n");
+ exit(1);
+ } else {
+ bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
+ | OP_LDX number '*' '(' '[' number ']' '&' number ')' {
+ if ($2 != 4 || $9 != 0xf) {
+ fprintf(stderr, "ldxb offset not supported!\n");
+ exit(1);
+ } else {
+ bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
+ ;
+
+st
+ : OP_ST 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_ST, 0, 0, $4); }
+ ;
+
+stx
+ : OP_STX 'M' '[' number ']' {
+ bpf_set_curr_instr(BPF_STX, 0, 0, $4); }
+ ;
+
+jmp
+ : OP_JMP label {
+ bpf_set_jmp_label($2, JKL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JA, 0, 0, 0); }
+ ;
+
+jeq
+ : OP_JEQ '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JEQ 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JEQ 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JEQ '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ ;
+
+jneq
+ : OP_JNEQ '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_K, 0, 0, $3); }
+ | OP_JNEQ 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ | OP_JNEQ '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JEQ | BPF_X, 0, 0, 0); }
+ ;
+
+jlt
+ : OP_JLT '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JLT 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JLT '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ ;
+
+jle
+ : OP_JLE '#' number ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JLE 'x' ',' label {
+ bpf_set_jmp_label($4, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JLE '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ ;
+
+jgt
+ : OP_JGT '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JGT 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_K, 0, 0, $3); }
+ | OP_JGT 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ | OP_JGT '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGT | BPF_X, 0, 0, 0); }
+ ;
+
+jge
+ : OP_JGE '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JGE 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_K, 0, 0, $3); }
+ | OP_JGE 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ | OP_JGE '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 0); }
+ ;
+
+jset
+ : OP_JSET '#' number ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); }
+ | OP_JSET 'x' ',' label ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_jmp_label($6, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '%' 'x' ',' label ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_jmp_label($7, JFL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '#' number ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_K, 0, 0, $3); }
+ | OP_JSET 'x' ',' label {
+ bpf_set_jmp_label($4, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ | OP_JSET '%' 'x' ',' label {
+ bpf_set_jmp_label($5, JTL);
+ bpf_set_curr_instr(BPF_JMP | BPF_JSET | BPF_X, 0, 0, 0); }
+ ;
+
+add
+ : OP_ADD '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_K, 0, 0, $3); }
+ | OP_ADD 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); }
+ | OP_ADD '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_ADD | BPF_X, 0, 0, 0); }
+ ;
+
+sub
+ : OP_SUB '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_K, 0, 0, $3); }
+ | OP_SUB 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); }
+ | OP_SUB '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_SUB | BPF_X, 0, 0, 0); }
+ ;
+
+mul
+ : OP_MUL '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_K, 0, 0, $3); }
+ | OP_MUL 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); }
+ | OP_MUL '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MUL | BPF_X, 0, 0, 0); }
+ ;
+
+div
+ : OP_DIV '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_K, 0, 0, $3); }
+ | OP_DIV 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); }
+ | OP_DIV '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_DIV | BPF_X, 0, 0, 0); }
+ ;
+
+mod
+ : OP_MOD '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_K, 0, 0, $3); }
+ | OP_MOD 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); }
+ | OP_MOD '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_MOD | BPF_X, 0, 0, 0); }
+ ;
+
+neg
+ : OP_NEG {
+ bpf_set_curr_instr(BPF_ALU | BPF_NEG, 0, 0, 0); }
+ ;
+
+and
+ : OP_AND '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_K, 0, 0, $3); }
+ | OP_AND 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); }
+ | OP_AND '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_AND | BPF_X, 0, 0, 0); }
+ ;
+
+or
+ : OP_OR '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_K, 0, 0, $3); }
+ | OP_OR 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); }
+ | OP_OR '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_OR | BPF_X, 0, 0, 0); }
+ ;
+
+xor
+ : OP_XOR '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_K, 0, 0, $3); }
+ | OP_XOR 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); }
+ | OP_XOR '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_XOR | BPF_X, 0, 0, 0); }
+ ;
+
+lsh
+ : OP_LSH '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_K, 0, 0, $3); }
+ | OP_LSH 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); }
+ | OP_LSH '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_LSH | BPF_X, 0, 0, 0); }
+ ;
+
+rsh
+ : OP_RSH '#' number {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_K, 0, 0, $3); }
+ | OP_RSH 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); }
+ | OP_RSH '%' 'x' {
+ bpf_set_curr_instr(BPF_ALU | BPF_RSH | BPF_X, 0, 0, 0); }
+ ;
+
+ret
+ : OP_RET 'a' {
+ bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); }
+ | OP_RET '%' 'a' {
+ bpf_set_curr_instr(BPF_RET | BPF_A, 0, 0, 0); }
+ | OP_RET 'x' {
+ bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); }
+ | OP_RET '%' 'x' {
+ bpf_set_curr_instr(BPF_RET | BPF_X, 0, 0, 0); }
+ | OP_RET '#' number {
+ bpf_set_curr_instr(BPF_RET | BPF_K, 0, 0, $3); }
+ ;
+
+tax
+ : OP_TAX {
+ bpf_set_curr_instr(BPF_MISC | BPF_TAX, 0, 0, 0); }
+ ;
+
+txa
+ : OP_TXA {
+ bpf_set_curr_instr(BPF_MISC | BPF_TXA, 0, 0, 0); }
+ ;
+
+%%
+
+static int curr_instr = 0;
+static struct sock_filter out[BPF_MAXINSNS];
+static char **labels, **labels_jt, **labels_jf, **labels_k;
+
+static void bpf_assert_max(void)
+{
+ if (curr_instr >= BPF_MAXINSNS) {
+ fprintf(stderr, "only max %u insns allowed!\n", BPF_MAXINSNS);
+ exit(1);
+ }
+}
+
+static void bpf_set_curr_instr(uint16_t code, uint8_t jt, uint8_t jf,
+ uint32_t k)
+{
+ bpf_assert_max();
+ out[curr_instr].code = code;
+ out[curr_instr].jt = jt;
+ out[curr_instr].jf = jf;
+ out[curr_instr].k = k;
+ curr_instr++;
+}
+
+static void bpf_set_curr_label(char *label)
+{
+ bpf_assert_max();
+ labels[curr_instr] = label;
+}
+
+static void bpf_set_jmp_label(char *label, enum jmp_type type)
+{
+ bpf_assert_max();
+ switch (type) {
+ case JTL:
+ labels_jt[curr_instr] = label;
+ break;
+ case JFL:
+ labels_jf[curr_instr] = label;
+ break;
+ case JKL:
+ labels_k[curr_instr] = label;
+ break;
+ }
+}
+
+static int bpf_find_insns_offset(const char *label)
+{
+ int i, max = curr_instr, ret = -ENOENT;
+
+ for (i = 0; i < max; i++) {
+ if (labels[i] && !strcmp(label, labels[i])) {
+ ret = i;
+ break;
+ }
+ }
+
+ if (ret == -ENOENT) {
+ fprintf(stderr, "no such label \'%s\'!\n", label);
+ exit(1);
+ }
+
+ return ret;
+}
+
+static void bpf_stage_1_insert_insns(void)
+{
+ yyparse();
+}
+
+static void bpf_reduce_k_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_k[i]) {
+ int off = bpf_find_insns_offset(labels_k[i]);
+ out[i].k = (uint32_t) (off - i - 1);
+ }
+ }
+}
+
+static uint8_t bpf_encode_jt_jf_offset(int off, int i)
+{
+ int delta = off - i - 1;
+
+ if (delta < 0 || delta > 255) {
+ fprintf(stderr, "error: insn #%d jumps to insn #%d, "
+ "which is out of range\n", i, off);
+ exit(1);
+ }
+ return (uint8_t) delta;
+}
+
+static void bpf_reduce_jt_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_jt[i]) {
+ int off = bpf_find_insns_offset(labels_jt[i]);
+ out[i].jt = bpf_encode_jt_jf_offset(off, i);
+ }
+ }
+}
+
+static void bpf_reduce_jf_jumps(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ if (labels_jf[i]) {
+ int off = bpf_find_insns_offset(labels_jf[i]);
+ out[i].jf = bpf_encode_jt_jf_offset(off, i);
+ }
+ }
+}
+
+static void bpf_stage_2_reduce_labels(void)
+{
+ bpf_reduce_k_jumps();
+ bpf_reduce_jt_jumps();
+ bpf_reduce_jf_jumps();
+}
+
+static void bpf_pretty_print_c(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++)
+ printf("{ %#04x, %2u, %2u, %#010x },\n", out[i].code,
+ out[i].jt, out[i].jf, out[i].k);
+}
+
+static void bpf_pretty_print(void)
+{
+ int i;
+
+ printf("%u,", curr_instr);
+ for (i = 0; i < curr_instr; i++)
+ printf("%u %u %u %u,", out[i].code,
+ out[i].jt, out[i].jf, out[i].k);
+ printf("\n");
+}
+
+static void bpf_init(void)
+{
+ memset(out, 0, sizeof(out));
+
+ labels = calloc(BPF_MAXINSNS, sizeof(*labels));
+ assert(labels);
+ labels_jt = calloc(BPF_MAXINSNS, sizeof(*labels_jt));
+ assert(labels_jt);
+ labels_jf = calloc(BPF_MAXINSNS, sizeof(*labels_jf));
+ assert(labels_jf);
+ labels_k = calloc(BPF_MAXINSNS, sizeof(*labels_k));
+ assert(labels_k);
+}
+
+static void bpf_destroy_labels(void)
+{
+ int i;
+
+ for (i = 0; i < curr_instr; i++) {
+ free(labels_jf[i]);
+ free(labels_jt[i]);
+ free(labels_k[i]);
+ free(labels[i]);
+ }
+}
+
+static void bpf_destroy(void)
+{
+ bpf_destroy_labels();
+ free(labels_jt);
+ free(labels_jf);
+ free(labels_k);
+ free(labels);
+}
+
+void bpf_asm_compile(FILE *fp, bool cstyle)
+{
+ yyin = fp;
+
+ bpf_init();
+ bpf_stage_1_insert_insns();
+ bpf_stage_2_reduce_labels();
+ bpf_destroy();
+
+ if (cstyle)
+ bpf_pretty_print_c();
+ else
+ bpf_pretty_print();
+
+ if (fp != stdin)
+ fclose(yyin);
+}
+
+void yyerror(const char *str)
+{
+ fprintf(stderr, "error: %s at line %d\n", str, yylineno);
+ exit(1);
+}
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
new file mode 100644
index 0000000000..a90a5d110f
--- /dev/null
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Minimal BPF JIT image disassembler
+ *
+ * Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
+ * debugging or verification purposes.
+ *
+ * To get the disassembly of the JIT code, do the following:
+ *
+ * 1) `echo 2 > /proc/sys/net/core/bpf_jit_enable`
+ * 2) Load a BPF filter (e.g. `tcpdump -p -n -s 0 -i eth1 host 192.168.20.0/24`)
+ * 3) Run e.g. `bpf_jit_disasm -o` to read out the last JIT code
+ *
+ * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <unistd.h>
+#include <string.h>
+#include <bfd.h>
+#include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
+#include <sys/klog.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <limits.h>
+#include <tools/dis-asm-compat.h>
+
+#define CMD_ACTION_SIZE_BUFFER 10
+#define CMD_ACTION_READ_ALL 3
+
+static void get_exec_path(char *tpath, size_t size)
+{
+ char *path;
+ ssize_t len;
+
+ snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
+ tpath[size - 1] = 0;
+
+ path = strdup(tpath);
+ assert(path);
+
+ len = readlink(path, tpath, size);
+ tpath[len] = 0;
+
+ free(path);
+}
+
+static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
+{
+ int count, i, pc = 0;
+ char tpath[PATH_MAX];
+ struct disassemble_info info;
+ disassembler_ftype disassemble;
+ bfd *bfdf;
+
+ memset(tpath, 0, sizeof(tpath));
+ get_exec_path(tpath, sizeof(tpath));
+
+ bfdf = bfd_openr(tpath, NULL);
+ assert(bfdf);
+ assert(bfd_check_format(bfdf, bfd_object));
+
+ init_disassemble_info_compat(&info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
+ info.arch = bfd_get_arch(bfdf);
+ info.mach = bfd_get_mach(bfdf);
+ info.buffer = image;
+ info.buffer_length = len;
+
+ disassemble_init_for_target(&info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ disassemble = disassembler(info.arch,
+ bfd_big_endian(bfdf),
+ info.mach,
+ bfdf);
+#else
+ disassemble = disassembler(bfdf);
+#endif
+ assert(disassemble);
+
+ do {
+ printf("%4x:\t", pc);
+
+ count = disassemble(pc, &info);
+
+ if (opcodes) {
+ printf("\n\t");
+ for (i = 0; i < count; ++i)
+ printf("%02x ", (uint8_t) image[pc + i]);
+ }
+ printf("\n");
+
+ pc += count;
+ } while(count > 0 && pc < len);
+
+ bfd_close(bfdf);
+}
+
+static char *get_klog_buff(unsigned int *klen)
+{
+ int ret, len;
+ char *buff;
+
+ len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+ if (len < 0)
+ return NULL;
+
+ buff = malloc(len);
+ if (!buff)
+ return NULL;
+
+ ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+ if (ret < 0) {
+ free(buff);
+ return NULL;
+ }
+
+ *klen = ret;
+ return buff;
+}
+
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+ int fd, ret, len;
+ struct stat fi;
+ char *buff;
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0)
+ return NULL;
+
+ ret = fstat(fd, &fi);
+ if (ret < 0 || !S_ISREG(fi.st_mode))
+ goto out;
+
+ len = fi.st_size + 1;
+ buff = malloc(len);
+ if (!buff)
+ goto out;
+
+ memset(buff, 0, len);
+ ret = read(fd, buff, len - 1);
+ if (ret <= 0)
+ goto out_free;
+
+ close(fd);
+ *klen = ret;
+ return buff;
+out_free:
+ free(buff);
+out:
+ close(fd);
+ return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+ return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
+}
+
+static void put_log_buff(char *buff)
+{
+ free(buff);
+}
+
+static uint8_t *get_last_jit_image(char *haystack, size_t hlen,
+ unsigned int *ilen)
+{
+ char *ptr, *pptr, *tmp;
+ off_t off = 0;
+ unsigned int proglen;
+ int ret, flen, pass, ulen = 0;
+ regmatch_t pmatch[1];
+ unsigned long base;
+ regex_t regex;
+ uint8_t *image;
+
+ if (hlen == 0)
+ return NULL;
+
+ ret = regcomp(&regex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ "
+ "pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED);
+ assert(ret == 0);
+
+ ptr = haystack;
+ memset(pmatch, 0, sizeof(pmatch));
+
+ while (1) {
+ ret = regexec(&regex, ptr, 1, pmatch, 0);
+ if (ret == 0) {
+ ptr += pmatch[0].rm_eo;
+ off += pmatch[0].rm_eo;
+ assert(off < hlen);
+ } else
+ break;
+ }
+
+ ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
+ ret = sscanf(ptr, "flen=%d proglen=%u pass=%d image=%lx",
+ &flen, &proglen, &pass, &base);
+ if (ret != 4) {
+ regfree(&regex);
+ return NULL;
+ }
+ if (proglen > 1000000) {
+ printf("proglen of %d too big, stopping\n", proglen);
+ return NULL;
+ }
+
+ image = malloc(proglen);
+ if (!image) {
+ printf("Out of memory\n");
+ return NULL;
+ }
+ memset(image, 0, proglen);
+
+ tmp = ptr = haystack + off;
+ while ((ptr = strtok(tmp, "\n")) != NULL && ulen < proglen) {
+ tmp = NULL;
+ if (!strstr(ptr, "JIT code"))
+ continue;
+ pptr = ptr;
+ while ((ptr = strstr(pptr, ":")))
+ pptr = ptr + 1;
+ ptr = pptr;
+ do {
+ image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16);
+ if (ptr == pptr) {
+ ulen--;
+ break;
+ }
+ if (ulen >= proglen)
+ break;
+ ptr = pptr;
+ } while (1);
+ }
+
+ assert(ulen == proglen);
+ printf("%u bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
+ proglen, pass, flen);
+ printf("%lx + <x>:\n", base);
+
+ regfree(&regex);
+ *ilen = ulen;
+ return image;
+}
+
+static void usage(void)
+{
+ printf("Usage: bpf_jit_disasm [...]\n");
+ printf(" -o Also display related opcodes (default: off).\n");
+ printf(" -O <file> Write binary image of code to file, don't disassemble to stdout.\n");
+ printf(" -f <file> Read last image dump from file or stdin (default: klog).\n");
+ printf(" -h Display this help.\n");
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int len, klen, opt, opcodes = 0;
+ char *kbuff, *file = NULL;
+ char *ofile = NULL;
+ int ofd;
+ ssize_t nr;
+ uint8_t *pos;
+ uint8_t *image = NULL;
+
+ while ((opt = getopt(argc, argv, "of:O:")) != -1) {
+ switch (opt) {
+ case 'o':
+ opcodes = 1;
+ break;
+ case 'O':
+ ofile = optarg;
+ break;
+ case 'f':
+ file = optarg;
+ break;
+ default:
+ usage();
+ return -1;
+ }
+ }
+
+ bfd_init();
+
+ kbuff = get_log_buff(file, &klen);
+ if (!kbuff) {
+ fprintf(stderr, "Could not retrieve log buffer!\n");
+ return -1;
+ }
+
+ image = get_last_jit_image(kbuff, klen, &len);
+ if (!image) {
+ fprintf(stderr, "No JIT image found!\n");
+ goto done;
+ }
+ if (!ofile) {
+ get_asm_insns(image, len, opcodes);
+ goto done;
+ }
+
+ ofd = open(ofile, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE);
+ if (ofd < 0) {
+ fprintf(stderr, "Could not open file %s for writing: ", ofile);
+ perror(NULL);
+ goto done;
+ }
+ pos = image;
+ do {
+ nr = write(ofd, pos, len);
+ if (nr < 0) {
+ fprintf(stderr, "Could not write data to %s: ", ofile);
+ perror(NULL);
+ goto done;
+ }
+ len -= nr;
+ pos += nr;
+ } while (len);
+ close(ofd);
+
+done:
+ put_log_buff(kbuff);
+ free(image);
+ return 0;
+}
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
new file mode 100644
index 0000000000..a736f64dc5
--- /dev/null
+++ b/tools/bpf/bpftool/.gitignore
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+*.d
+/bootstrap/
+/bpftool
+bpftool*.8
+FEATURE-DUMP.bpftool
+feature
+libbpf
+/*.skel.h
+/vmlinux.h
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
new file mode 100644
index 0000000000..ac8487dcff
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+include ../../../scripts/Makefile.include
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+prefix ?= /usr/local
+mandir ?= $(prefix)/man
+man8dir = $(mandir)/man8
+
+MAN8_RST = $(wildcard bpftool*.rst)
+
+_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
+DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
+
+man: man8
+man8: $(DOC_MAN8)
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+RST2MAN_OPTS += --verbose --strip-comments
+
+list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
+see_also = $(subst " ",, \
+ "\n" \
+ "SEE ALSO\n" \
+ "========\n" \
+ "\t**bpf**\ (2),\n" \
+ "\t**bpf-helpers**\\ (7)" \
+ $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \
+ "\n")
+
+$(OUTPUT)%.8: %.rst
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
+ $(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
+
+clean:
+ $(call QUIET_CLEAN, Documentation)
+ $(Q)$(RM) $(DOC_MAN8)
+
+install: man
+ $(call QUIET_INSTALL, Documentation-man)
+ $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
+ $(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
+
+uninstall:
+ $(call QUIET_UNINST, Documentation-man)
+ $(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
+ $(Q)$(RMDIR) $(DESTDIR)$(man8dir)
+
+.PHONY: man man8 clean install uninstall
+.DEFAULT_GOAL := man
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
new file mode 100644
index 0000000000..342716f74e
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -0,0 +1,268 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-btf
+================
+-------------------------------------------------------------------------------
+tool for inspection of BTF data
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **btf** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } }
+
+ *COMMANDS* := { **dump** | **help** }
+
+BTF COMMANDS
+=============
+
+| **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*]
+| **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*]
+| **bpftool** **btf help**
+|
+| *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* }
+| *FORMAT* := { **raw** | **c** }
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+
+DESCRIPTION
+===========
+ **bpftool btf { show | list }** [**id** *BTF_ID*]
+ Show information about loaded BTF objects. If a BTF ID is
+ specified, show information only about given BTF object,
+ otherwise list all BTF objects currently loaded on the
+ system.
+
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BTF
+ objects. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool btf dump** *BTF_SRC*
+ Dump BTF entries from a given *BTF_SRC*.
+
+ When **id** is specified, BTF object with that ID will be
+ loaded and all its BTF types emitted.
+
+ When **map** is provided, it's expected that map has
+ associated BTF object with BTF types describing key and
+ value. It's possible to select whether to dump only BTF
+ type(s) associated with key (**key**), value (**value**),
+ both key and value (**kv**), or all BTF types present in
+ associated BTF object (**all**). If not specified, **kv**
+ is assumed.
+
+ When **prog** is provided, it's expected that program has
+ associated BTF object with BTF types.
+
+ When specifying *FILE*, an ELF file is expected, containing
+ .BTF section with well-defined BTF binary format data,
+ typically produced by clang or pahole.
+
+ **format** option can be used to override default (raw)
+ output format. Raw (**raw**) or C-syntax (**c**) output
+ formats are supported.
+
+ **bpftool btf help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -B, --base-btf *FILE*
+ Pass a base BTF object. Base BTF objects are typically used
+ with BTF objects for kernel modules. To avoid duplicating
+ all kernel symbols required by modules, BTF objects for
+ modules are "split", they are built incrementally on top of
+ the kernel (vmlinux) BTF object. So the base BTF reference
+ should usually point to the kernel BTF.
+
+ When the main BTF object to process (for example, the
+ module BTF to dump) is passed as a *FILE*, bpftool attempts
+ to autodetect the path for the base object, and passing
+ this option is optional. When the main BTF object is passed
+ through other handles, this option becomes necessary.
+
+EXAMPLES
+========
+**# bpftool btf dump id 1226**
+
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] STRUCT 'dummy_tracepoint_args' size=16 vlen=2
+ 'pad' type_id=3 bits_offset=0
+ 'sock' type_id=4 bits_offset=64
+ [3] INT 'long long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [4] PTR '(anon)' type_id=5
+ [5] FWD 'sock' fwd_kind=union
+
+This gives an example of default output for all supported BTF kinds.
+
+**$ cat prog.c**
+
+::
+
+ struct fwd_struct;
+
+ enum my_enum {
+ VAL1 = 3,
+ VAL2 = 7,
+ };
+
+ typedef struct my_struct my_struct_t;
+
+ struct my_struct {
+ const unsigned int const_int_field;
+ int bitfield_field: 4;
+ char arr_field[16];
+ const struct fwd_struct *restrict fwd_field;
+ enum my_enum enum_field;
+ volatile my_struct_t *typedef_ptr_field;
+ };
+
+ union my_union {
+ int a;
+ struct my_struct b;
+ };
+
+ struct my_struct struct_global_var __attribute__((section("data_sec"))) = {
+ .bitfield_field = 3,
+ .enum_field = VAL1,
+ };
+ int global_var __attribute__((section("data_sec"))) = 7;
+
+ __attribute__((noinline))
+ int my_func(union my_union *arg1, int arg2)
+ {
+ static int static_var __attribute__((section("data_sec"))) = 123;
+ static_var++;
+ return static_var;
+ }
+
+**$ bpftool btf dump file prog.o**
+
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] UNION 'my_union' size=48 vlen=2
+ 'a' type_id=3 bits_offset=0
+ 'b' type_id=4 bits_offset=0
+ [3] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+ [4] STRUCT 'my_struct' size=48 vlen=6
+ 'const_int_field' type_id=5 bits_offset=0
+ 'bitfield_field' type_id=3 bits_offset=32 bitfield_size=4
+ 'arr_field' type_id=8 bits_offset=40
+ 'fwd_field' type_id=10 bits_offset=192
+ 'enum_field' type_id=14 bits_offset=256
+ 'typedef_ptr_field' type_id=15 bits_offset=320
+ [5] CONST '(anon)' type_id=6
+ [6] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ [7] INT 'char' size=1 bits_offset=0 nr_bits=8 encoding=SIGNED
+ [8] ARRAY '(anon)' type_id=7 index_type_id=9 nr_elems=16
+ [9] INT '__ARRAY_SIZE_TYPE__' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ [10] RESTRICT '(anon)' type_id=11
+ [11] PTR '(anon)' type_id=12
+ [12] CONST '(anon)' type_id=13
+ [13] FWD 'fwd_struct' fwd_kind=union
+ [14] ENUM 'my_enum' size=4 vlen=2
+ 'VAL1' val=3
+ 'VAL2' val=7
+ [15] PTR '(anon)' type_id=16
+ [16] VOLATILE '(anon)' type_id=17
+ [17] TYPEDEF 'my_struct_t' type_id=4
+ [18] FUNC_PROTO '(anon)' ret_type_id=3 vlen=2
+ 'arg1' type_id=1
+ 'arg2' type_id=3
+ [19] FUNC 'my_func' type_id=18
+ [20] VAR 'struct_global_var' type_id=4, linkage=global-alloc
+ [21] VAR 'global_var' type_id=3, linkage=global-alloc
+ [22] VAR 'my_func.static_var' type_id=3, linkage=static
+ [23] DATASEC 'data_sec' size=0 vlen=3
+ type_id=20 offset=0 size=48
+ type_id=21 offset=0 size=4
+ type_id=22 offset=52 size=4
+
+The following commands print BTF types associated with specified map's key,
+value, both key and value, and all BTF types, respectively. By default, both
+key and value types will be printed.
+
+**# bpftool btf dump map id 123 key**
+
+::
+
+ [39] TYPEDEF 'u32' type_id=37
+
+**# bpftool btf dump map id 123 value**
+
+::
+
+ [86] PTR '(anon)' type_id=87
+
+**# bpftool btf dump map id 123 kv**
+
+::
+
+ [39] TYPEDEF 'u32' type_id=37
+ [86] PTR '(anon)' type_id=87
+
+**# bpftool btf dump map id 123 all**
+
+::
+
+ [1] PTR '(anon)' type_id=0
+ .
+ .
+ .
+ [2866] ARRAY '(anon)' type_id=52 index_type_id=51 nr_elems=4
+
+All the standard ways to specify map or program are supported:
+
+**# bpftool btf dump map id 123**
+
+**# bpftool btf dump map pinned /sys/fs/bpf/map_name**
+
+**# bpftool btf dump prog id 456**
+
+**# bpftool btf dump prog tag b88e0a09b1d9759d**
+
+**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
+
+|
+| **# bpftool btf dump file /sys/kernel/btf/i2c_smbus**
+| (or)
+| **# I2C_SMBUS_ID=$(bpftool btf show -p | jq '.[] | select(.name=="i2c_smbus").id')**
+| **# bpftool btf dump id ${I2C_SMBUS_ID} -B /sys/kernel/btf/vmlinux**
+
+::
+
+ [104848] STRUCT 'i2c_smbus_alert' size=40 vlen=2
+ 'alert' type_id=393 bits_offset=0
+ 'ara' type_id=56050 bits_offset=256
+ [104849] STRUCT 'alert_data' size=12 vlen=3
+ 'addr' type_id=16 bits_offset=0
+ 'type' type_id=56053 bits_offset=32
+ 'data' type_id=7 bits_offset=64
+ [104850] PTR '(anon)' type_id=104848
+ [104851] PTR '(anon)' type_id=104849
+ [104852] FUNC 'i2c_register_spd' type_id=84745 linkage=static
+ [104853] FUNC 'smbalert_driver_init' type_id=1213 linkage=static
+ [104854] FUNC_PROTO '(anon)' ret_type_id=18 vlen=1
+ 'ara' type_id=56050
+ [104855] FUNC 'i2c_handle_smbus_alert' type_id=104854 linkage=static
+ [104856] FUNC 'smbalert_remove' type_id=104854 linkage=static
+ [104857] FUNC_PROTO '(anon)' ret_type_id=18 vlen=2
+ 'ara' type_id=56050
+ 'id' type_id=56056
+ [104858] FUNC 'smbalert_probe' type_id=104857 linkage=static
+ [104859] FUNC 'smbalert_work' type_id=9695 linkage=static
+ [104860] FUNC 'smbus_alert' type_id=71367 linkage=static
+ [104861] FUNC 'smbus_do_alert' type_id=84827 linkage=static
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
new file mode 100644
index 0000000000..bd015ec984
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -0,0 +1,157 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-cgroup
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **cgroup** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } }
+
+ *COMMANDS* :=
+ { **show** | **list** | **tree** | **attach** | **detach** | **help** }
+
+CGROUP COMMANDS
+===============
+
+| **bpftool** **cgroup** { **show** | **list** } *CGROUP* [**effective**]
+| **bpftool** **cgroup tree** [*CGROUP_ROOT*] [**effective**]
+| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+| **bpftool** **cgroup help**
+|
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** |
+| **cgroup_inet_sock_create** | **cgroup_sock_ops** |
+| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** |
+| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** |
+| **cgroup_inet4_connect** | **cgroup_inet6_connect** |
+| **cgroup_inet4_getpeername** | **cgroup_inet6_getpeername** |
+| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** |
+| **cgroup_udp4_sendmsg** | **cgroup_udp6_sendmsg** |
+| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** |
+| **cgroup_sysctl** | **cgroup_getsockopt** | **cgroup_setsockopt** |
+| **cgroup_inet_sock_release** }
+| *ATTACH_FLAGS* := { **multi** | **override** }
+
+DESCRIPTION
+===========
+ **bpftool cgroup { show | list }** *CGROUP* [**effective**]
+ List all programs attached to the cgroup *CGROUP*.
+
+ Output will start with program ID followed by attach type,
+ attach flags and program name.
+
+ If **effective** is specified retrieve effective programs that
+ will execute for events within a cgroup. This includes
+ inherited along with attached ones.
+
+ **bpftool cgroup tree** [*CGROUP_ROOT*] [**effective**]
+ Iterate over all cgroups in *CGROUP_ROOT* and list all
+ attached programs. If *CGROUP_ROOT* is not specified,
+ bpftool uses cgroup v2 mountpoint.
+
+ The output is similar to the output of cgroup show/list
+ commands: it starts with absolute cgroup path, followed by
+ program ID, attach type, attach flags and program name.
+
+ If **effective** is specified retrieve effective programs that
+ will execute for events within a cgroup. This includes
+ inherited along with attached ones.
+
+ **bpftool cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
+ Attach program *PROG* to the cgroup *CGROUP* with attach type
+ *ATTACH_TYPE* and optional *ATTACH_FLAGS*.
+
+ *ATTACH_FLAGS* can be one of: **override** if a sub-cgroup installs
+ some bpf program, the program in this cgroup yields to sub-cgroup
+ program; **multi** if a sub-cgroup installs some bpf program,
+ that cgroup program gets run in addition to the program in this
+ cgroup.
+
+ Only one program is allowed to be attached to a cgroup with
+ no attach flags or the **override** flag. Attaching another
+ program will release old program and attach the new one.
+
+ Multiple programs are allowed to be attached to a cgroup with
+ **multi**. They are executed in FIFO order (those that were
+ attached first, run first).
+
+ Non-default *ATTACH_FLAGS* are supported by kernel version 4.14
+ and later.
+
+ *ATTACH_TYPE* can be on of:
+ **ingress** ingress path of the inet socket (since 4.10);
+ **egress** egress path of the inet socket (since 4.10);
+ **sock_create** opening of an inet socket (since 4.10);
+ **sock_ops** various socket operations (since 4.12);
+ **device** device access (since 4.15);
+ **bind4** call to bind(2) for an inet4 socket (since 4.17);
+ **bind6** call to bind(2) for an inet6 socket (since 4.17);
+ **post_bind4** return from bind(2) for an inet4 socket (since 4.17);
+ **post_bind6** return from bind(2) for an inet6 socket (since 4.17);
+ **connect4** call to connect(2) for an inet4 socket (since 4.17);
+ **connect6** call to connect(2) for an inet6 socket (since 4.17);
+ **sendmsg4** call to sendto(2), sendmsg(2), sendmmsg(2) for an
+ unconnected udp4 socket (since 4.18);
+ **sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
+ unconnected udp6 socket (since 4.18);
+ **recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+ an unconnected udp4 socket (since 5.2);
+ **recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
+ an unconnected udp6 socket (since 5.2);
+ **sysctl** sysctl access (since 5.2);
+ **getsockopt** call to getsockopt (since 5.3);
+ **setsockopt** call to setsockopt (since 5.3);
+ **getpeername4** call to getpeername(2) for an inet4 socket (since 5.8);
+ **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
+ **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
+ **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
+ **sock_release** closing an userspace inet socket (since 5.9).
+
+ **bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
+ Detach *PROG* from the cgroup *CGROUP* and attach type
+ *ATTACH_TYPE*.
+
+ **bpftool prog help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -f, --bpffs
+ Show file names of pinned programs.
+
+EXAMPLES
+========
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# mkdir /sys/fs/cgroup/test.slice**
+| **# bpftool prog load ./device_cgroup.o /sys/fs/bpf/prog**
+| **# bpftool cgroup attach /sys/fs/cgroup/test.slice/ device id 1 allow_multi**
+
+**# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
+ 1 device allow_multi bpf_prog1
+
+|
+| **# bpftool cgroup detach /sys/fs/cgroup/test.slice/ device id 1**
+| **# bpftool cgroup list /sys/fs/cgroup/test.slice/**
+
+::
+
+ ID AttachType AttachFlags Name
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
new file mode 100644
index 0000000000..e44039f89b
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -0,0 +1,90 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+===============
+bpftool-feature
+===============
+-------------------------------------------------------------------------------
+tool for inspection of eBPF-related parameters for Linux kernel or net device
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **feature** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* := { **probe** | **help** }
+
+FEATURE COMMANDS
+================
+
+| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature list_builtins** *GROUP*
+| **bpftool** **feature help**
+|
+| *COMPONENT* := { **kernel** | **dev** *NAME* }
+| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** }
+
+DESCRIPTION
+===========
+ **bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]]
+ Probe the running kernel and dump a number of eBPF-related
+ parameters, such as availability of the **bpf**\ () system call,
+ JIT status, eBPF program types availability, eBPF helper
+ functions availability, and more.
+
+ By default, bpftool **does not run probes** for
+ **bpf_probe_write_user**\ () and **bpf_trace_printk**\()
+ helpers which print warnings to kernel logs. To enable them
+ and run all probes, the **full** keyword should be used.
+
+ If the **macros** keyword (but not the **-j** option) is
+ passed, a subset of the output is dumped as a list of
+ **#define** macros that are ready to be included in a C
+ header file, for example. If, additionally, **prefix** is
+ used to define a *PREFIX*, the provided string will be used
+ as a prefix to the names of the macros: this can be used to
+ avoid conflicts on macro names when including the output of
+ this command as a header file.
+
+ Keyword **kernel** can be omitted. If no probe target is
+ specified, probing the kernel is the default behaviour.
+
+ When the **unprivileged** keyword is used, bpftool will dump
+ only the features available to a user who does not have the
+ **CAP_SYS_ADMIN** capability set. The features available in
+ that case usually represent a small subset of the parameters
+ supported by the system. Unprivileged users MUST use the
+ **unprivileged** keyword: This is to avoid misdetection if
+ bpftool is inadvertently run as non-root, for example. This
+ keyword is unavailable if bpftool was compiled without
+ libcap.
+
+ **bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]]
+ Probe network device for supported eBPF features and dump
+ results to the console.
+
+ The keywords **full**, **macros** and **prefix** have the
+ same role as when probing the kernel.
+
+ **bpftool feature list_builtins** *GROUP*
+ List items known to bpftool. These can be BPF program types
+ (**prog_types**), BPF map types (**map_types**), attach types
+ (**attach_types**), link types (**link_types**), or BPF helper
+ functions (**helpers**). The command does not probe the system, but
+ simply lists the elements that bpftool knows from compilation time,
+ as provided from libbpf (for all object types) or from the BPF UAPI
+ header (list of helpers). This can be used in scripts to iterate over
+ BPF types or helpers.
+
+ **bpftool feature help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
new file mode 100644
index 0000000000..5006e724d1
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -0,0 +1,446 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-gen
+================
+-------------------------------------------------------------------------------
+tool for BPF code-generation
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **gen** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } }
+
+ *COMMAND* := { **object** | **skeleton** | **help** }
+
+GEN COMMANDS
+=============
+
+| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
+| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*]
+| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
+| **bpftool** **gen help**
+
+DESCRIPTION
+===========
+ **bpftool gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+ Statically link (combine) together one or more *INPUT_FILE*'s
+ into a single resulting *OUTPUT_FILE*. All the files involved
+ are BPF ELF object files.
+
+ The rules of BPF static linking are mostly the same as for
+ user-space object files, but in addition to combining data
+ and instruction sections, .BTF and .BTF.ext (if present in
+ any of the input files) data are combined together. .BTF
+ data is deduplicated, so all the common types across
+ *INPUT_FILE*'s will only be represented once in the resulting
+ BTF information.
+
+ BPF static linking allows to partition BPF source code into
+ individually compiled files that are then linked into
+ a single resulting BPF object file, which can be used to
+ generated BPF skeleton (with **gen skeleton** command) or
+ passed directly into **libbpf** (using **bpf_object__open()**
+ family of APIs).
+
+ **bpftool gen skeleton** *FILE*
+ Generate BPF skeleton C header file for a given *FILE*.
+
+ BPF skeleton is an alternative interface to existing libbpf
+ APIs for working with BPF objects. Skeleton code is intended
+ to significantly shorten and simplify code to load and work
+ with BPF programs from userspace side. Generated code is
+ tailored to specific input BPF object *FILE*, reflecting its
+ structure by listing out available maps, program, variables,
+ etc. Skeleton eliminates the need to lookup mentioned
+ components by name. Instead, if skeleton instantiation
+ succeeds, they are populated in skeleton structure as valid
+ libbpf types (e.g., **struct bpf_map** pointer) and can be
+ passed to existing generic libbpf APIs.
+
+ In addition to simple and reliable access to maps and
+ programs, skeleton provides a storage for BPF links (**struct
+ bpf_link**) for each BPF program within BPF object. When
+ requested, supported BPF programs will be automatically
+ attached and resulting BPF links stored for further use by
+ user in pre-allocated fields in skeleton struct. For BPF
+ programs that can't be automatically attached by libbpf,
+ user can attach them manually, but store resulting BPF link
+ in per-program link field. All such set up links will be
+ automatically destroyed on BPF skeleton destruction. This
+ eliminates the need for users to manage links manually and
+ rely on libbpf support to detach programs and free up
+ resources.
+
+ Another facility provided by BPF skeleton is an interface to
+ global variables of all supported kinds: mutable, read-only,
+ as well as extern ones. This interface allows to pre-setup
+ initial values of variables before BPF object is loaded and
+ verified by kernel. For non-read-only variables, the same
+ interface can be used to fetch values of global variables on
+ userspace side, even if they are modified by BPF code.
+
+ During skeleton generation, contents of source BPF object
+ *FILE* is embedded within generated code and is thus not
+ necessary to keep around. This ensures skeleton and BPF
+ object file are matching 1-to-1 and always stay in sync.
+ Generated code is dual-licensed under LGPL-2.1 and
+ BSD-2-Clause licenses.
+
+ It is a design goal and guarantee that skeleton interfaces
+ are interoperable with generic libbpf APIs. User should
+ always be able to use skeleton API to create and load BPF
+ object, and later use libbpf APIs to keep working with
+ specific maps, programs, etc.
+
+ As part of skeleton, few custom functions are generated.
+ Each of them is prefixed with object name. Object name can
+ either be derived from object file name, i.e., if BPF object
+ file name is **example.o**, BPF object name will be
+ **example**. Object name can be also specified explicitly
+ through **name** *OBJECT_NAME* parameter. The following
+ custom functions are provided (assuming **example** as
+ the object name):
+
+ - **example__open** and **example__open_opts**.
+ These functions are used to instantiate skeleton. It
+ corresponds to libbpf's **bpf_object__open**\ () API.
+ **_opts** variants accepts extra **bpf_object_open_opts**
+ options.
+
+ - **example__load**.
+ This function creates maps, loads and verifies BPF
+ programs, initializes global data maps. It corresponds to
+ libppf's **bpf_object__load**\ () API.
+
+ - **example__open_and_load** combines **example__open** and
+ **example__load** invocations in one commonly used
+ operation.
+
+ - **example__attach** and **example__detach**
+ This pair of functions allow to attach and detach,
+ correspondingly, already loaded BPF object. Only BPF
+ programs of types supported by libbpf for auto-attachment
+ will be auto-attached and their corresponding BPF links
+ instantiated. For other BPF programs, user can manually
+ create a BPF link and assign it to corresponding fields in
+ skeleton struct. **example__detach** will detach both
+ links created automatically, as well as those populated by
+ user manually.
+
+ - **example__destroy**
+ Detach and unload BPF programs, free up all the resources
+ used by skeleton and BPF object.
+
+ If BPF object has global variables, corresponding structs
+ with memory layout corresponding to global data data section
+ layout will be created. Currently supported ones are: *.data*,
+ *.bss*, *.rodata*, and *.kconfig* structs/data sections.
+ These data sections/structs can be used to set up initial
+ values of variables, if set before **example__load**.
+ Afterwards, if target kernel supports memory-mapped BPF
+ arrays, same structs can be used to fetch and update
+ (non-read-only) data from userspace, with same simplicity
+ as for BPF side.
+
+ **bpftool gen subskeleton** *FILE*
+ Generate BPF subskeleton C header file for a given *FILE*.
+
+ Subskeletons are similar to skeletons, except they do not own
+ the corresponding maps, programs, or global variables. They
+ require that the object file used to generate them is already
+ loaded into a *bpf_object* by some other means.
+
+ This functionality is useful when a library is included into a
+ larger BPF program. A subskeleton for the library would have
+ access to all objects and globals defined in it, without
+ having to know about the larger program.
+
+ Consequently, there are only two functions defined
+ for subskeletons:
+
+ - **example__open(bpf_object\*)**
+ Instantiates a subskeleton from an already opened (but not
+ necessarily loaded) **bpf_object**.
+
+ - **example__destroy()**
+ Frees the storage for the subskeleton but *does not* unload
+ any BPF programs or maps.
+
+ **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
+ Generate a minimum BTF file as *OUTPUT*, derived from a given
+ *INPUT* BTF file, containing all needed BTF types so one, or
+ more, given eBPF objects CO-RE relocations may be satisfied.
+
+ When kernels aren't compiled with CONFIG_DEBUG_INFO_BTF,
+ libbpf, when loading an eBPF object, has to rely on external
+ BTF files to be able to calculate CO-RE relocations.
+
+ Usually, an external BTF file is built from existing kernel
+ DWARF data using pahole. It contains all the types used by
+ its respective kernel image and, because of that, is big.
+
+ The min_core_btf feature builds smaller BTF files, customized
+ to one or multiple eBPF objects, so they can be distributed
+ together with an eBPF CO-RE based application, turning the
+ application portable to different kernel versions.
+
+ Check examples bellow for more information how to use it.
+
+ **bpftool gen help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -L, --use-loader
+ For skeletons, generate a "light" skeleton (also known as "loader"
+ skeleton). A light skeleton contains a loader eBPF program. It does
+ not use the majority of the libbpf infrastructure, and does not need
+ libelf.
+
+EXAMPLES
+========
+**$ cat example1.bpf.c**
+
+::
+
+ #include <stdbool.h>
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+
+ const volatile int param1 = 42;
+ bool global_flag = true;
+ struct { int x; } data = {};
+
+ SEC("raw_tp/sys_enter")
+ int handle_sys_enter(struct pt_regs *ctx)
+ {
+ static long my_static_var;
+ if (global_flag)
+ my_static_var++;
+ else
+ data.x += param1;
+ return 0;
+ }
+
+**$ cat example2.bpf.c**
+
+::
+
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, int);
+ __type(value, long);
+ } my_map SEC(".maps");
+
+ SEC("raw_tp/sys_exit")
+ int handle_sys_exit(struct pt_regs *ctx)
+ {
+ int zero = 0;
+ bpf_map_lookup_elem(&my_map, &zero);
+ return 0;
+ }
+
+This is example BPF application with two BPF programs and a mix of BPF maps
+and global variables. Source code is split across two source code files.
+
+**$ clang --target=bpf -g example1.bpf.c -o example1.bpf.o**
+
+**$ clang --target=bpf -g example2.bpf.c -o example2.bpf.o**
+
+**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
+
+This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
+individually and then statically links respective object files into the final
+BPF ELF object file *example.bpf.o*.
+
+**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
+
+::
+
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+ /* THIS FILE IS AUTOGENERATED! */
+ #ifndef __EXAMPLE_SKEL_H__
+ #define __EXAMPLE_SKEL_H__
+
+ #include <stdlib.h>
+ #include <bpf/libbpf.h>
+
+ struct example {
+ struct bpf_object_skeleton *skeleton;
+ struct bpf_object *obj;
+ struct {
+ struct bpf_map *rodata;
+ struct bpf_map *data;
+ struct bpf_map *bss;
+ struct bpf_map *my_map;
+ } maps;
+ struct {
+ struct bpf_program *handle_sys_enter;
+ struct bpf_program *handle_sys_exit;
+ } progs;
+ struct {
+ struct bpf_link *handle_sys_enter;
+ struct bpf_link *handle_sys_exit;
+ } links;
+ struct example__bss {
+ struct {
+ int x;
+ } data;
+ } *bss;
+ struct example__data {
+ _Bool global_flag;
+ long int handle_sys_enter_my_static_var;
+ } *data;
+ struct example__rodata {
+ int param1;
+ } *rodata;
+ };
+
+ static void example__destroy(struct example *obj);
+ static inline struct example *example__open_opts(
+ const struct bpf_object_open_opts *opts);
+ static inline struct example *example__open();
+ static inline int example__load(struct example *obj);
+ static inline struct example *example__open_and_load();
+ static inline int example__attach(struct example *obj);
+ static inline void example__detach(struct example *obj);
+
+ #endif /* __EXAMPLE_SKEL_H__ */
+
+**$ cat example.c**
+
+::
+
+ #include "example.skel.h"
+
+ int main()
+ {
+ struct example *skel;
+ int err = 0;
+
+ skel = example__open();
+ if (!skel)
+ goto cleanup;
+
+ skel->rodata->param1 = 128;
+
+ err = example__load(skel);
+ if (err)
+ goto cleanup;
+
+ err = example__attach(skel);
+ if (err)
+ goto cleanup;
+
+ /* all libbpf APIs are usable */
+ printf("my_map name: %s\n", bpf_map__name(skel->maps.my_map));
+ printf("sys_enter prog FD: %d\n",
+ bpf_program__fd(skel->progs.handle_sys_enter));
+
+ /* detach and re-attach sys_exit program */
+ bpf_link__destroy(skel->links.handle_sys_exit);
+ skel->links.handle_sys_exit =
+ bpf_program__attach(skel->progs.handle_sys_exit);
+
+ printf("my_static_var: %ld\n",
+ skel->bss->handle_sys_enter_my_static_var);
+
+ cleanup:
+ example__destroy(skel);
+ return err;
+ }
+
+**# ./example**
+
+::
+
+ my_map name: my_map
+ sys_enter prog FD: 8
+ my_static_var: 7
+
+This is a stripped-out version of skeleton generated for above example code.
+
+min_core_btf
+------------
+
+**$ bpftool btf dump file 5.4.0-example.btf format raw**
+
+::
+
+ [1] INT 'long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [2] CONST '(anon)' type_id=1
+ [3] VOLATILE '(anon)' type_id=1
+ [4] ARRAY '(anon)' type_id=1 index_type_id=21 nr_elems=2
+ [5] PTR '(anon)' type_id=8
+ [6] CONST '(anon)' type_id=5
+ [7] INT 'char' size=1 bits_offset=0 nr_bits=8 encoding=(none)
+ [8] CONST '(anon)' type_id=7
+ [9] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ <long output>
+
+**$ bpftool btf dump file one.bpf.o format raw**
+
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] STRUCT 'trace_event_raw_sys_enter' size=64 vlen=4
+ 'ent' type_id=3 bits_offset=0
+ 'id' type_id=7 bits_offset=64
+ 'args' type_id=9 bits_offset=128
+ '__data' type_id=12 bits_offset=512
+ [3] STRUCT 'trace_entry' size=8 vlen=4
+ 'type' type_id=4 bits_offset=0
+ 'flags' type_id=5 bits_offset=16
+ 'preempt_count' type_id=5 bits_offset=24
+ <long output>
+
+**$ bpftool gen min_core_btf 5.4.0-example.btf 5.4.0-smaller.btf one.bpf.o**
+
+**$ bpftool btf dump file 5.4.0-smaller.btf format raw**
+
+::
+
+ [1] TYPEDEF 'pid_t' type_id=6
+ [2] STRUCT 'trace_event_raw_sys_enter' size=64 vlen=1
+ 'args' type_id=4 bits_offset=128
+ [3] STRUCT 'task_struct' size=9216 vlen=2
+ 'pid' type_id=1 bits_offset=17920
+ 'real_parent' type_id=7 bits_offset=18048
+ [4] ARRAY '(anon)' type_id=5 index_type_id=8 nr_elems=6
+ [5] INT 'long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [6] TYPEDEF '__kernel_pid_t' type_id=8
+ [7] PTR '(anon)' type_id=3
+ [8] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+ <end>
+
+Now, the "5.4.0-smaller.btf" file may be used by libbpf as an external BTF file
+when loading the "one.bpf.o" object into the "5.4.0-example" kernel. Note that
+the generated BTF file won't allow other eBPF objects to be loaded, just the
+ones given to min_core_btf.
+
+::
+
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .btf_custom_path = "5.4.0-smaller.btf");
+ struct bpf_object *obj;
+
+ obj = bpf_object__open_file("one.bpf.o", &opts);
+
+ ...
diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
new file mode 100644
index 0000000000..84839d4886
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
@@ -0,0 +1,76 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+============
+bpftool-iter
+============
+-------------------------------------------------------------------------------
+tool to create BPF iterators
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **iter** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* := { **pin** | **help** }
+
+ITER COMMANDS
+===================
+
+| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*]
+| **bpftool** **iter help**
+|
+| *OBJ* := /a/file/of/bpf_iter_target.o
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
+
+DESCRIPTION
+===========
+ **bpftool iter pin** *OBJ* *PATH* [**map** *MAP*]
+ A bpf iterator combines a kernel iterating of
+ particular kernel data (e.g., tasks, bpf_maps, etc.)
+ and a bpf program called for each kernel data object
+ (e.g., one task, one bpf_map, etc.). User space can
+ *read* kernel iterator output through *read()* syscall.
+
+ The *pin* command creates a bpf iterator from *OBJ*,
+ and pin it to *PATH*. The *PATH* should be located
+ in *bpffs* mount. It must not contain a dot
+ character ('.'), which is reserved for future extensions
+ of *bpffs*.
+
+ Map element bpf iterator requires an additional parameter
+ *MAP* so bpf program can iterate over map elements for
+ that map. User can have a bpf program in kernel to run
+ with each map element, do checking, filtering, aggregation,
+ etc. without copying data to user space.
+
+ User can then *cat PATH* to see the bpf iterator output.
+
+ **bpftool iter help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+EXAMPLES
+========
+**# bpftool iter pin bpf_iter_netlink.o /sys/fs/bpf/my_netlink**
+
+::
+
+ Create a file-based bpf iterator from bpf_iter_netlink.o and pin it
+ to /sys/fs/bpf/my_netlink
+
+**# bpftool iter pin bpf_iter_hashmap.o /sys/fs/bpf/my_hashmap map id 20**
+
+::
+
+ Create a file-based bpf iterator from bpf_iter_hashmap.o and map with
+ id 20, and pin it to /sys/fs/bpf/my_hashmap
diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst
new file mode 100644
index 0000000000..52a4eee4af
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst
@@ -0,0 +1,112 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-link
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF links
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **link** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
+
+ *COMMANDS* := { **show** | **list** | **pin** | **help** }
+
+LINK COMMANDS
+=============
+
+| **bpftool** **link { show | list }** [*LINK*]
+| **bpftool** **link pin** *LINK* *FILE*
+| **bpftool** **link detach** *LINK*
+| **bpftool** **link help**
+|
+| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* }
+
+
+DESCRIPTION
+===========
+ **bpftool link { show | list }** [*LINK*]
+ Show information about active links. If *LINK* is
+ specified show information only about given link,
+ otherwise list all links currently active on the system.
+
+ Output will start with link ID followed by link type and
+ zero or more named attributes, some of which depend on type
+ of link.
+
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ links. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool link pin** *LINK* *FILE*
+ Pin link *LINK* as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool link detach** *LINK*
+ Force-detach link *LINK*. BPF link and its underlying BPF
+ program will stay valid, but they will be detached from the
+ respective BPF hook and BPF link will transition into
+ a defunct state until last open file descriptor for that
+ link is closed.
+
+ **bpftool link help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -f, --bpffs
+ When showing BPF links, show file names of pinned
+ links.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
+EXAMPLES
+========
+**# bpftool link show**
+
+::
+
+ 10: cgroup prog 25
+ cgroup_id 614 attach_type egress
+ pids test_progs(223)
+
+**# bpftool --json --pretty link show**
+
+::
+
+ [{
+ "type": "cgroup",
+ "prog_id": 25,
+ "cgroup_id": 614,
+ "attach_type": "egress",
+ "pids": [{
+ "pid": 223,
+ "comm": "test_progs"
+ }
+ ]
+ }
+ ]
+
+|
+| **# bpftool link pin id 10 /sys/fs/bpf/link**
+| **# ls -l /sys/fs/bpf/**
+
+::
+
+ -rw------- 1 root root 0 Apr 23 21:39 link
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
new file mode 100644
index 0000000000..3b7ba037af
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -0,0 +1,277 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-map
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF maps
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **map** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
+
+ *COMMANDS* :=
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
+ **delete** | **pin** | **help** }
+
+MAP COMMANDS
+=============
+
+| **bpftool** **map** { **show** | **list** } [*MAP*]
+| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
+| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
+| [**offload_dev** *NAME*]
+| **bpftool** **map dump** *MAP*
+| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+| **bpftool** **map lookup** *MAP* [**key** *DATA*]
+| **bpftool** **map getnext** *MAP* [**key** *DATA*]
+| **bpftool** **map delete** *MAP* **key** *DATA*
+| **bpftool** **map pin** *MAP* *FILE*
+| **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+| **bpftool** **map peek** *MAP*
+| **bpftool** **map push** *MAP* **value** *VALUE*
+| **bpftool** **map pop** *MAP*
+| **bpftool** **map enqueue** *MAP* **value** *VALUE*
+| **bpftool** **map dequeue** *MAP*
+| **bpftool** **map freeze** *MAP*
+| **bpftool** **map help**
+|
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* | **name** *MAP_NAME* }
+| *DATA* := { [**hex**] *BYTES* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
+| *VALUE* := { *DATA* | *MAP* | *PROG* }
+| *UPDATE_FLAGS* := { **any** | **exist** | **noexist** }
+| *TYPE* := { **hash** | **array** | **prog_array** | **perf_event_array** | **percpu_hash**
+| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash**
+| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
+| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
+| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
+| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
+| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** }
+
+DESCRIPTION
+===========
+ **bpftool map { show | list }** [*MAP*]
+ Show information about loaded maps. If *MAP* is specified
+ show information only about given maps, otherwise list all
+ maps currently loaded on the system. In case of **name**,
+ *MAP* may match several maps which will all be shown.
+
+ Output will start with map ID followed by map type and
+ zero or more named attributes (depending on kernel version).
+
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ maps. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**offload_dev** *NAME*]
+ Create a new map with given parameters and pin it to *bpffs*
+ as *FILE*.
+
+ *FLAGS* should be an integer which is the combination of
+ desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h
+ UAPI header for existing flags).
+
+ To create maps of type array-of-maps or hash-of-maps, the
+ **inner_map** keyword must be used to pass an inner map. The
+ kernel needs it to collect metadata related to the inner maps
+ that the new map will work with.
+
+ Keyword **offload_dev** expects a network interface name,
+ and is used to request hardware offload for the map.
+
+ **bpftool map dump** *MAP*
+ Dump all entries in a given *MAP*. In case of **name**,
+ *MAP* may match several maps which will all be dumped.
+
+ **bpftool map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
+ Update map entry for a given *KEY*.
+
+ *UPDATE_FLAGS* can be one of: **any** update existing entry
+ or add if doesn't exit; **exist** update only if entry already
+ exists; **noexist** update only if entry doesn't exist.
+
+ If the **hex** keyword is provided in front of the bytes
+ sequence, the bytes are parsed as hexadecimal values, even if
+ no "0x" prefix is added. If the keyword is not provided, then
+ the bytes are parsed as decimal values, unless a "0x" prefix
+ (for hexadecimal) or a "0" prefix (for octal) is provided.
+
+ **bpftool map lookup** *MAP* [**key** *DATA*]
+ Lookup **key** in the map.
+
+ **bpftool map getnext** *MAP* [**key** *DATA*]
+ Get next key. If *key* is not specified, get first key.
+
+ **bpftool map delete** *MAP* **key** *DATA*
+ Remove entry from the map.
+
+ **bpftool map pin** *MAP* *FILE*
+ Pin map *MAP* as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
+ Read events from a **BPF_MAP_TYPE_PERF_EVENT_ARRAY** map.
+
+ Install perf rings into a perf event array map and dump
+ output of any **bpf_perf_event_output**\ () call in the kernel.
+ By default read the number of CPUs on the system and
+ install perf ring for each CPU in the corresponding index
+ in the array.
+
+ If **cpu** and **index** are specified, install perf ring
+ for given **cpu** at **index** in the array (single ring).
+
+ Note that installing a perf ring into an array will silently
+ replace any existing ring. Any other application will stop
+ receiving events if it installed its rings earlier.
+
+ **bpftool map peek** *MAP*
+ Peek next value in the queue or stack.
+
+ **bpftool map push** *MAP* **value** *VALUE*
+ Push *VALUE* onto the stack.
+
+ **bpftool map pop** *MAP*
+ Pop and print value from the stack.
+
+ **bpftool map enqueue** *MAP* **value** *VALUE*
+ Enqueue *VALUE* into the queue.
+
+ **bpftool map dequeue** *MAP*
+ Dequeue and print value from the queue.
+
+ **bpftool map freeze** *MAP*
+ Freeze the map as read-only from user space. Entries from a
+ frozen map can not longer be updated or deleted with the
+ **bpf**\ () system call. This operation is not reversible,
+ and the map remains immutable from user space until its
+ destruction. However, read and write permissions for BPF
+ programs to the map remain unchanged.
+
+ **bpftool map help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -f, --bpffs
+ Show file names of pinned maps.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
+EXAMPLES
+========
+**# bpftool map show**
+
+::
+
+ 10: hash name some_map flags 0x0
+ key 4B value 8B max_entries 2048 memlock 167936B
+ pids systemd(1)
+
+The following three commands are equivalent:
+
+|
+| **# bpftool map update id 10 key hex 20 c4 b7 00 value hex 0f ff ff ab 01 02 03 4c**
+| **# bpftool map update id 10 key 0x20 0xc4 0xb7 0x00 value 0x0f 0xff 0xff 0xab 0x01 0x02 0x03 0x4c**
+| **# bpftool map update id 10 key 32 196 183 0 value 15 255 255 171 1 2 3 76**
+
+**# bpftool map lookup id 10 key 0 1 2 3**
+
+::
+
+ key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
+
+
+**# bpftool map dump id 10**
+
+::
+
+ key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
+ key: 0d 00 07 00 value: 02 00 00 00 01 02 03 04
+ Found 2 elements
+
+**# bpftool map getnext id 10 key 0 1 2 3**
+
+::
+
+ key:
+ 00 01 02 03
+ next key:
+ 0d 00 07 00
+
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# bpftool map pin id 10 /sys/fs/bpf/map**
+| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
+
+Note that map update can also be used in order to change the program references
+hold by a program array map. This can be used, for example, to change the
+programs used for tail-call jumps at runtime, without having to reload the
+entry-point program. Below is an example for this use case: we load a program
+defining a prog array map, and with a main function that contains a tail call
+to other programs that can be used either to "process" packets or to "debug"
+processing. Note that the prog array map MUST be pinned into the BPF virtual
+file system for the map update to work successfully, as kernel flushes prog
+array maps when they have no more references from user space (and the update
+would be lost as soon as bpftool exits).
+
+|
+| **# bpftool prog loadall tail_calls.o /sys/fs/bpf/foo type xdp**
+| **# bpftool prog --bpffs**
+
+::
+
+ 545: xdp name main_func tag 674b4b5597193dc3 gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 240B jited 257B memlock 4096B map_ids 294
+ pinned /sys/fs/bpf/foo/xdp
+ 546: xdp name bpf_func_process tag e369a529024751fc gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 200B jited 164B memlock 4096B
+ pinned /sys/fs/bpf/foo/process
+ 547: xdp name bpf_func_debug tag 0b597868bc7f0976 gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 200B jited 164B memlock 4096B
+ pinned /sys/fs/bpf/foo/debug
+
+**# bpftool map**
+
+::
+
+ 294: prog_array name jmp_table flags 0x0
+ key 4B value 4B max_entries 1 memlock 4096B
+ owner_prog_type xdp owner jited
+
+|
+| **# bpftool map pin id 294 /sys/fs/bpf/bar**
+| **# bpftool map dump pinned /sys/fs/bpf/bar**
+
+::
+
+ Found 0 elements
+
+|
+| **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
+| **# bpftool map dump pinned /sys/fs/bpf/bar**
+
+::
+
+ key: 00 00 00 00 value: 22 02 00 00
+ Found 1 element
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
new file mode 100644
index 0000000000..5e2abd3de5
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -0,0 +1,182 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-net
+================
+-------------------------------------------------------------------------------
+tool for inspection of networking related bpf prog attachments
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **net** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* :=
+ { **show** | **list** | **attach** | **detach** | **help** }
+
+NET COMMANDS
+============
+
+| **bpftool** **net** { **show** | **list** } [ **dev** *NAME* ]
+| **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
+| **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
+| **bpftool** **net help**
+|
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
+| *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** }
+
+DESCRIPTION
+===========
+ **bpftool net { show | list }** [ **dev** *NAME* ]
+ List bpf program attachments in the kernel networking subsystem.
+
+ Currently, device driver xdp attachments, tcx and old-style tc
+ classifier/action attachments, flow_dissector as well as netfilter
+ attachments are implemented, i.e., for
+ program types **BPF_PROG_TYPE_XDP**, **BPF_PROG_TYPE_SCHED_CLS**,
+ **BPF_PROG_TYPE_SCHED_ACT**, **BPF_PROG_TYPE_FLOW_DISSECTOR**,
+ **BPF_PROG_TYPE_NETFILTER**.
+
+ For programs attached to a particular cgroup, e.g.,
+ **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
+ **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ users can use **bpftool cgroup** to dump cgroup attachments.
+ For sk_{filter, skb, msg, reuseport} and lwt/seg6
+ bpf programs, users should consult other tools, e.g., iproute2.
+
+ The current output will start with all xdp program attachments, followed by
+ all tcx, then tc class/qdisc bpf program attachments, then flow_dissector
+ and finally netfilter programs. Both xdp programs and tcx/tc programs are
+ ordered based on ifindex number. If multiple bpf programs attached
+ to the same networking device through **tc**, the order will be first
+ all bpf programs attached to tcx, then tc classes, then all bpf programs
+ attached to non clsact qdiscs, and finally all bpf programs attached
+ to root and clsact qdisc.
+
+ **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
+ Attach bpf program *PROG* to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. Previously attached bpf program
+ can be replaced by the command used with **overwrite** option.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+
+ *ATTACH_TYPE* can be of:
+ **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
+ **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
+ **xdpdrv** - Native XDP. runs earliest point in driver's receive path;
+ **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
+
+ **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
+ Detach bpf program attached to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. To detach bpf program, same
+ *ATTACH_TYPE* previously used for attach must be specified.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+
+ **bpftool net help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+EXAMPLES
+========
+
+| **# bpftool net**
+
+::
+
+ xdp:
+ eth0(2) driver id 198
+
+ tc:
+ eth0(2) htb name prefix_matcher.o:[cls_prefix_matcher_htb] id 111727 act []
+ eth0(2) clsact/ingress fbflow_icmp id 130246 act []
+ eth0(2) clsact/egress prefix_matcher.o:[cls_prefix_matcher_clsact] id 111726
+ eth0(2) clsact/egress cls_fg_dscp id 108619 act []
+ eth0(2) clsact/egress fbflow_egress id 130245
+
+|
+| **# bpftool -jp net**
+
+::
+
+ [{
+ "xdp": [{
+ "devname": "eth0",
+ "ifindex": 2,
+ "mode": "driver",
+ "id": 198
+ }
+ ],
+ "tc": [{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "htb",
+ "name": "prefix_matcher.o:[cls_prefix_matcher_htb]",
+ "id": 111727,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/ingress",
+ "name": "fbflow_icmp",
+ "id": 130246,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "prefix_matcher.o:[cls_prefix_matcher_clsact]",
+ "id": 111726,
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "cls_fg_dscp",
+ "id": 108619,
+ "act": []
+ },{
+ "devname": "eth0",
+ "ifindex": 2,
+ "kind": "clsact/egress",
+ "name": "fbflow_egress",
+ "id": 130245,
+ }
+ ]
+ }
+ ]
+
+|
+| **# bpftool net attach xdpdrv id 16 dev enp6s0np0**
+| **# bpftool net**
+
+::
+
+ xdp:
+ enp6s0np0(4) driver id 16
+
+|
+| **# bpftool net attach xdpdrv id 16 dev enp6s0np0**
+| **# bpftool net attach xdpdrv id 20 dev enp6s0np0 overwrite**
+| **# bpftool net**
+
+::
+
+ xdp:
+ enp6s0np0(4) driver id 20
+
+|
+| **# bpftool net attach xdpdrv id 16 dev enp6s0np0**
+| **# bpftool net detach xdpdrv dev enp6s0np0**
+| **# bpftool net**
+
+::
+
+ xdp:
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
new file mode 100644
index 0000000000..5fea633a82
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -0,0 +1,69 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-perf
+================
+-------------------------------------------------------------------------------
+tool for inspection of perf related bpf prog attachments
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **perf** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* :=
+ { **show** | **list** | **help** }
+
+PERF COMMANDS
+=============
+
+| **bpftool** **perf** { **show** | **list** }
+| **bpftool** **perf help**
+
+DESCRIPTION
+===========
+ **bpftool perf { show | list }**
+ List all raw_tracepoint, tracepoint, kprobe attachment in the system.
+
+ Output will start with process id and file descriptor in that process,
+ followed by bpf program id, attachment information, and attachment point.
+ The attachment point for raw_tracepoint/tracepoint is the trace probe name.
+ The attachment point for k[ret]probe is either symbol name and offset,
+ or a kernel virtual address.
+ The attachment point for u[ret]probe is the file name and the file offset.
+
+ **bpftool perf help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+EXAMPLES
+========
+
+| **# bpftool perf**
+
+::
+
+ pid 21711 fd 5: prog_id 5 kprobe func __x64_sys_write offset 0
+ pid 21765 fd 5: prog_id 7 kretprobe func __x64_sys_nanosleep offset 0
+ pid 21767 fd 5: prog_id 8 tracepoint sys_enter_nanosleep
+ pid 21800 fd 5: prog_id 9 uprobe filename /home/yhs/a.out offset 1159
+
+|
+| **# bpftool -j perf**
+
+::
+
+ [{"pid":21711,"fd":5,"prog_id":5,"fd_type":"kprobe","func":"__x64_sys_write","offset":0}, \
+ {"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \
+ {"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \
+ {"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}]
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
new file mode 100644
index 0000000000..dcae81bd27
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -0,0 +1,375 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-prog
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF progs
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **prog** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| |
+ { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
+ { **-L** | **--use-loader** } }
+
+ *COMMANDS* :=
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
+ **loadall** | **help** }
+
+PROG COMMANDS
+=============
+
+| **bpftool** **prog** { **show** | **list** } [*PROG*]
+| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
+| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
+| **bpftool** **prog pin** *PROG* *FILE*
+| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
+| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
+| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
+| **bpftool** **prog tracelog**
+| **bpftool** **prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
+| **bpftool** **prog profile** *PROG* [**duration** *DURATION*] *METRICs*
+| **bpftool** **prog help**
+|
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
+| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* }
+| *TYPE* := {
+| **socket** | **kprobe** | **kretprobe** | **classifier** | **action** |
+| **tracepoint** | **raw_tracepoint** | **xdp** | **perf_event** | **cgroup/skb** |
+| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
+| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
+| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
+| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
+| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
+| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
+| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
+| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
+| }
+| *ATTACH_TYPE* := {
+| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** |
+| **sk_skb_stream_parser** | **flow_dissector**
+| }
+| *METRICs* := {
+| **cycles** | **instructions** | **l1d_loads** | **llc_misses** |
+| **itlb_misses** | **dtlb_misses**
+| }
+
+
+DESCRIPTION
+===========
+ **bpftool prog { show | list }** [*PROG*]
+ Show information about loaded programs. If *PROG* is
+ specified show information only about given programs,
+ otherwise list all programs currently loaded on the system.
+ In case of **tag** or **name**, *PROG* may match several
+ programs which will all be shown.
+
+ Output will start with program ID followed by program type and
+ zero or more named attributes (depending on kernel version).
+
+ Since Linux 5.1 the kernel can collect statistics on BPF
+ programs (such as the total time spent running the program,
+ and the number of times it was run). If available, bpftool
+ shows such statistics. However, the kernel does not collect
+ them by defaults, as it slightly impacts performance on each
+ program run. Activation or deactivation of the feature is
+ performed via the **kernel.bpf_stats_enabled** sysctl knob.
+
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ programs. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
+ Dump eBPF instructions of the programs from the kernel. By
+ default, eBPF will be disassembled and printed to standard
+ output in human-readable format. In this case, **opcodes**
+ controls if raw opcodes should be printed as well.
+
+ In case of **tag** or **name**, *PROG* may match several
+ programs which will all be dumped. However, if **file** or
+ **visual** is specified, *PROG* must match a single program.
+
+ If **file** is specified, the binary image will instead be
+ written to *FILE*.
+
+ If **visual** is specified, control flow graph (CFG) will be
+ built instead, and eBPF instructions will be presented with
+ CFG in DOT format, on standard output.
+
+ If the programs have line_info available, the source line will
+ be displayed. If **linum** is specified, the filename, line
+ number and line column will also be displayed.
+
+ **bpftool prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
+ Dump jited image (host machine code) of the program.
+
+ If *FILE* is specified image will be written to a file,
+ otherwise it will be disassembled and printed to stdout.
+ *PROG* must match a single program when **file** is specified.
+
+ **opcodes** controls if raw opcodes will be printed.
+
+ If the prog has line_info available, the source line will
+ be displayed. If **linum** is specified, the filename, line
+ number and line column will also be displayed.
+
+ **bpftool prog pin** *PROG* *FILE*
+ Pin program *PROG* as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** { **idx** *IDX* | **name** *NAME* } *MAP*] [{ **offload_dev** | **xdpmeta_dev** } *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
+ Load bpf program(s) from binary *OBJ* and pin as *PATH*.
+ **bpftool prog load** pins only the first program from the
+ *OBJ* as *PATH*. **bpftool prog loadall** pins all programs
+ from the *OBJ* under *PATH* directory.
+ **type** is optional, if not specified program type will be
+ inferred from section names.
+ By default bpftool will create new maps as declared in the ELF
+ object being loaded. **map** parameter allows for the reuse
+ of existing maps. It can be specified multiple times, each
+ time for a different map. *IDX* refers to index of the map
+ to be replaced in the ELF file counting from 0, while *NAME*
+ allows to replace a map by name. *MAP* specifies the map to
+ use, referring to it by **id** or through a **pinned** file.
+ If **offload_dev** *NAME* is specified program will be loaded
+ onto given networking device (offload).
+ If **xdpmeta_dev** *NAME* is specified program will become
+ device-bound without offloading, this facilitates access
+ to XDP metadata.
+ Optional **pinmaps** argument can be provided to pin all
+ maps under *MAP_DIR* directory.
+
+ If **autoattach** is specified program will be attached
+ before pin. In that case, only the link (representing the
+ program attached to its hook) is pinned, not the program as
+ such, so the path won't show in **bpftool prog show -f**,
+ only show in **bpftool link show -f**. Also, this only works
+ when bpftool (libbpf) is able to infer all necessary
+ information from the object file, in particular, it's not
+ supported for all program types. If a program does not
+ support autoattach, bpftool falls back to regular pinning
+ for that program instead.
+
+ Note: *PATH* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
+ Attach bpf program *PROG* (with type specified by
+ *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP*
+ parameter, with the exception of *flow_dissector* which is
+ attached to current networking name space.
+
+ **bpftool prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
+ Detach bpf program *PROG* (with type specified by
+ *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP*
+ parameter, with the exception of *flow_dissector* which is
+ detached from the current networking name space.
+
+ **bpftool prog tracelog**
+ Dump the trace pipe of the system to the console (stdout).
+ Hit <Ctrl+C> to stop printing. BPF programs can write to this
+ trace pipe at runtime with the **bpf_trace_printk**\ () helper.
+ This should be used only for debugging purposes. For
+ streaming data from BPF programs to user space, one can use
+ perf events (see also **bpftool-map**\ (8)).
+
+ **bpftool prog run** *PROG* **data_in** *FILE* [**data_out** *FILE* [**data_size_out** *L*]] [**ctx_in** *FILE* [**ctx_out** *FILE* [**ctx_size_out** *M*]]] [**repeat** *N*]
+ Run BPF program *PROG* in the kernel testing infrastructure
+ for BPF, meaning that the program works on the data and
+ context provided by the user, and not on actual packets or
+ monitored functions etc. Return value and duration for the
+ test run are printed out to the console.
+
+ Input data is read from the *FILE* passed with **data_in**.
+ If this *FILE* is "**-**", input data is read from standard
+ input. Input context, if any, is read from *FILE* passed with
+ **ctx_in**. Again, "**-**" can be used to read from standard
+ input, but only if standard input is not already in use for
+ input data. If a *FILE* is passed with **data_out**, output
+ data is written to that file. Similarly, output context is
+ written to the *FILE* passed with **ctx_out**. For both
+ output flows, "**-**" can be used to print to the standard
+ output (as plain text, or JSON if relevant option was
+ passed). If output keywords are omitted, output data and
+ context are discarded. Keywords **data_size_out** and
+ **ctx_size_out** are used to pass the size (in bytes) for the
+ output buffers to the kernel, although the default of 32 kB
+ should be more than enough for most cases.
+
+ Keyword **repeat** is used to indicate the number of
+ consecutive runs to perform. Note that output data and
+ context printed to files correspond to the last of those
+ runs. The duration printed out at the end of the runs is an
+ average over all runs performed by the command.
+
+ Not all program types support test run. Among those which do,
+ not all of them can take the **ctx_in**/**ctx_out**
+ arguments. bpftool does not perform checks on program types.
+
+ **bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs*
+ Profile *METRICs* for bpf program *PROG* for *DURATION*
+ seconds or until user hits <Ctrl+C>. *DURATION* is optional.
+ If *DURATION* is not specified, the profiling will run up to
+ **UINT_MAX** seconds.
+
+ **bpftool prog help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -f, --bpffs
+ When showing BPF programs, show file names of pinned
+ programs.
+
+ -m, --mapcompat
+ Allow loading maps with unknown map definitions.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
+ -L, --use-loader
+ Load program as a "loader" program. This is useful to debug
+ the generation of such programs. When this option is in
+ use, bpftool attempts to load the programs from the object
+ file into the kernel, but does not pin them (therefore, the
+ *PATH* must not be provided).
+
+ When combined with the **-d**\ \|\ **--debug** option,
+ additional debug messages are generated, and the execution
+ of the loader program will use the **bpf_trace_printk**\ ()
+ helper to log each step of loading BTF, creating the maps,
+ and loading the programs (see **bpftool prog tracelog** as
+ a way to dump those messages).
+
+EXAMPLES
+========
+**# bpftool prog show**
+
+::
+
+ 10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10
+ loaded_at 2017-09-29T20:11:00+0000 uid 0
+ xlated 528B jited 370B memlock 4096B map_ids 10
+ pids systemd(1)
+
+**# bpftool --json --pretty prog show**
+
+::
+
+ [{
+ "id": 10,
+ "type": "xdp",
+ "tag": "005a3d2123620c8b",
+ "gpl_compatible": true,
+ "run_time_ns": 81632,
+ "run_cnt": 10,
+ "loaded_at": 1506715860,
+ "uid": 0,
+ "bytes_xlated": 528,
+ "jited": true,
+ "bytes_jited": 370,
+ "bytes_memlock": 4096,
+ "map_ids": [10
+ ],
+ "pids": [{
+ "pid": 1,
+ "comm": "systemd"
+ }
+ ]
+ }
+ ]
+
+|
+| **# bpftool prog dump xlated id 10 file /tmp/t**
+| **$ ls -l /tmp/t**
+
+::
+
+ -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
+
+**# bpftool prog dump jited tag 005a3d2123620c8b**
+
+::
+
+ 0: push %rbp
+ 1: mov %rsp,%rbp
+ 2: sub $0x228,%rsp
+ 3: sub $0x28,%rbp
+ 4: mov %rbx,0x0(%rbp)
+
+|
+| **# mount -t bpf none /sys/fs/bpf/**
+| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
+| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
+| **# ls -l /sys/fs/bpf/**
+
+::
+
+ -rw------- 1 root root 0 Jul 22 01:43 prog
+ -rw------- 1 root root 0 Jul 22 01:44 prog2
+
+**# bpftool prog dump jited pinned /sys/fs/bpf/prog opcodes**
+
+::
+
+ 0: push %rbp
+ 55
+ 1: mov %rsp,%rbp
+ 48 89 e5
+ 4: sub $0x228,%rsp
+ 48 81 ec 28 02 00 00
+ b: sub $0x28,%rbp
+ 48 83 ed 28
+ f: mov %rbx,0x0(%rbp)
+ 48 89 5d 00
+
+|
+| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
+| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
+
+::
+
+ 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
+ loaded_at 2018-06-25T16:17:31-0700 uid 0
+ xlated 488B jited 336B memlock 4096B map_ids 7
+
+**# rm /sys/fs/bpf/xdp1**
+
+|
+| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses**
+
+::
+
+ 51397 run_cnt
+ 40176203 cycles (83.05%)
+ 42518139 instructions # 1.06 insns per cycle (83.39%)
+ 123 llc_misses # 2.89 LLC misses per million insns (83.15%)
+
+|
+| Output below is for the trace logs.
+| Run in separate terminals:
+| **# bpftool prog tracelog**
+| **# bpftool prog load -L -d file.o**
+
+::
+
+ bpftool-620059 [004] d... 2634685.517903: bpf_trace_printk: btf_load size 665 r=5
+ bpftool-620059 [004] d... 2634685.517912: bpf_trace_printk: map_create sample_map idx 0 type 2 value_size 4 value_btf_id 0 r=6
+ bpftool-620059 [004] d... 2634685.517997: bpf_trace_printk: prog_load sample insn_cnt 13 r=7
+ bpftool-620059 [004] d... 2634685.517999: bpf_trace_printk: close(5) = 0
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
new file mode 100644
index 0000000000..8022b5321d
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -0,0 +1,92 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+==================
+bpftool-struct_ops
+==================
+-------------------------------------------------------------------------------
+tool to register/unregister/introspect BPF struct_ops
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **struct_ops** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* :=
+ { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+STRUCT_OPS COMMANDS
+===================
+
+| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*]
+| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*]
+| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP*
+| **bpftool** **struct_ops help**
+|
+| *STRUCT_OPS_MAP* := { **id** *STRUCT_OPS_MAP_ID* | **name** *STRUCT_OPS_MAP_NAME* }
+| *OBJ* := /a/file/of/bpf_struct_ops.o
+
+
+DESCRIPTION
+===========
+ **bpftool struct_ops { show | list }** [*STRUCT_OPS_MAP*]
+ Show brief information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it shows information only
+ for the given struct_ops. Otherwise, it lists all struct_ops
+ currently existing in the system.
+
+ Output will start with struct_ops map ID, followed by its map
+ name and its struct_ops's kernel type.
+
+ **bpftool struct_ops dump** [*STRUCT_OPS_MAP*]
+ Dump details information about the struct_ops in the system.
+ If *STRUCT_OPS_MAP* is specified, it dumps information only
+ for the given struct_ops. Otherwise, it dumps all struct_ops
+ currently existing in the system.
+
+ **bpftool struct_ops register** *OBJ* [*LINK_DIR*]
+ Register bpf struct_ops from *OBJ*. All struct_ops under
+ the ELF section ".struct_ops" and ".struct_ops.link" will
+ be registered to its kernel subsystem. For each
+ struct_ops in the ".struct_ops.link" section, a link
+ will be created. You can give *LINK_DIR* to provide a
+ directory path where these links will be pinned with the
+ same name as their corresponding map name.
+
+ **bpftool struct_ops unregister** *STRUCT_OPS_MAP*
+ Unregister the *STRUCT_OPS_MAP* from the kernel subsystem.
+
+ **bpftool struct_ops help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+EXAMPLES
+========
+**# bpftool struct_ops show**
+
+::
+
+ 100: dctcp tcp_congestion_ops
+ 105: cubic tcp_congestion_ops
+
+**# bpftool struct_ops unregister id 105**
+
+::
+
+ Unregistered tcp_congestion_ops cubic id 105
+
+**# bpftool struct_ops register bpf_cubic.o**
+
+::
+
+ Registered tcp_congestion_ops cubic id 110
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
new file mode 100644
index 0000000000..6965c94dfd
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -0,0 +1,70 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+BPFTOOL
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF programs and maps
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] *OBJECT* { *COMMAND* | **help** }
+
+ **bpftool** **batch file** *FILE*
+
+ **bpftool** **version**
+
+ *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** |
+ **btf** | **gen** | **struct_ops** | **iter** }
+
+ *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }
+
+ *MAP-COMMANDS* :=
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
+ **delete** | **pin** | **event_pipe** | **help** }
+
+ *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** |
+ **load** | **attach** | **detach** | **help** }
+
+ *LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** }
+
+ *CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
+
+ *PERF-COMMANDS* := { **show** | **list** | **help** }
+
+ *NET-COMMANDS* := { **show** | **list** | **help** }
+
+ *FEATURE-COMMANDS* := { **probe** | **help** }
+
+ *BTF-COMMANDS* := { **show** | **list** | **dump** | **help** }
+
+ *GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** }
+
+ *STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+ *ITER-COMMANDS* := { **pin** | **help** }
+
+DESCRIPTION
+===========
+ *bpftool* allows for inspection and simple modification of BPF objects
+ on the system.
+
+ Note that format of the output of all tools is not guaranteed to be
+ stable and should not be depended upon.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -m, --mapcompat
+ Allow loading maps with unknown map definitions.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
new file mode 100644
index 0000000000..30df7a707f
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+-h, --help
+ Print short help message (similar to **bpftool help**).
+
+-V, --version
+ Print bpftool's version number (similar to **bpftool version**), the
+ number of the libbpf version in use, and optional features that were
+ included when bpftool was compiled. Optional features include linking
+ against LLVM or libbfd to provide the disassembler for JIT-ted
+ programs (**bpftool prog dump jited**) and usage of BPF skeletons
+ (some features like **bpftool prog profile** or showing pids
+ associated to BPF objects may rely on it).
+
+-j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+-p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+-d, --debug
+ Print all logs available, even debug-level information. This includes
+ logs from libbpf as well as from the verifier, when attempting to
+ load programs.
diff --git a/tools/bpf/bpftool/Documentation/substitutions.rst b/tools/bpf/bpftool/Documentation/substitutions.rst
new file mode 100644
index 0000000000..827e3ffb17
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/substitutions.rst
@@ -0,0 +1,3 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** }
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
new file mode 100644
index 0000000000..e9154ace80
--- /dev/null
+++ b/tools/bpf/bpftool/Makefile
@@ -0,0 +1,299 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+include ../../scripts/Makefile.include
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+BPF_DIR = $(srctree)/tools/lib/bpf
+
+ifneq ($(OUTPUT),)
+ _OUTPUT := $(OUTPUT)
+else
+ _OUTPUT := $(CURDIR)/
+endif
+BOOTSTRAP_OUTPUT := $(_OUTPUT)bootstrap/
+
+LIBBPF_OUTPUT := $(_OUTPUT)libbpf/
+LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
+LIBBPF := $(LIBBPF_OUTPUT)libbpf.a
+
+LIBBPF_BOOTSTRAP_OUTPUT := $(BOOTSTRAP_OUTPUT)libbpf/
+LIBBPF_BOOTSTRAP_DESTDIR := $(LIBBPF_BOOTSTRAP_OUTPUT)
+LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)include
+LIBBPF_BOOTSTRAP_HDRS_DIR := $(LIBBPF_BOOTSTRAP_INCLUDE)/bpf
+LIBBPF_BOOTSTRAP := $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
+
+# We need to copy hashmap.h, nlattr.h, relo_core.h and libbpf_internal.h
+# which are not otherwise exported by libbpf, but still required by bpftool.
+LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h relo_core.h libbpf_internal.h)
+LIBBPF_BOOTSTRAP_INTERNAL_HDRS := $(addprefix $(LIBBPF_BOOTSTRAP_HDRS_DIR)/,hashmap.h relo_core.h libbpf_internal.h)
+
+$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR) $(LIBBPF_BOOTSTRAP_HDRS_DIR):
+ $(QUIET_MKDIR)mkdir -p $@
+
+$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \
+ DESTDIR=$(LIBBPF_DESTDIR:/=) prefix= $(LIBBPF) install_headers
+
+$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR)
+ $(call QUIET_INSTALL, $@)
+ $(Q)install -m 644 -t $(LIBBPF_HDRS_DIR) $<
+
+$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
+ DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" $@ install_headers
+
+$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
+ $(call QUIET_INSTALL, $@)
+ $(Q)install -m 644 -t $(LIBBPF_BOOTSTRAP_HDRS_DIR) $<
+
+$(LIBBPF)-clean: FORCE | $(LIBBPF_OUTPUT)
+ $(call QUIET_CLEAN, libbpf)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) clean >/dev/null
+
+$(LIBBPF_BOOTSTRAP)-clean: FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
+ $(call QUIET_CLEAN, libbpf-bootstrap)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) clean >/dev/null
+
+prefix ?= /usr/local
+bash_compdir ?= /usr/share/bash-completion/completions
+
+CFLAGS += -O2
+CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
+CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
+CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
+ -I$(or $(OUTPUT),.) \
+ -I$(LIBBPF_INCLUDE) \
+ -I$(srctree)/kernel/bpf/ \
+ -I$(srctree)/tools/include \
+ -I$(srctree)/tools/include/uapi
+ifneq ($(BPFTOOL_VERSION),)
+CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
+endif
+ifneq ($(EXTRA_CFLAGS),)
+CFLAGS += $(EXTRA_CFLAGS)
+endif
+ifneq ($(EXTRA_LDFLAGS),)
+LDFLAGS += $(EXTRA_LDFLAGS)
+endif
+
+INSTALL ?= install
+RM ?= rm -f
+
+FEATURE_USER = .bpftool
+
+FEATURE_TESTS := clang-bpf-co-re
+FEATURE_TESTS += llvm
+FEATURE_TESTS += libcap
+FEATURE_TESTS += libbfd
+FEATURE_TESTS += libbfd-liberty
+FEATURE_TESTS += libbfd-liberty-z
+FEATURE_TESTS += disassembler-four-args
+FEATURE_TESTS += disassembler-init-styled
+
+FEATURE_DISPLAY := clang-bpf-co-re
+FEATURE_DISPLAY += llvm
+FEATURE_DISPLAY += libcap
+FEATURE_DISPLAY += libbfd
+FEATURE_DISPLAY += libbfd-liberty
+FEATURE_DISPLAY += libbfd-liberty-z
+
+check_feat := 1
+NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
+ifdef MAKECMDGOALS
+ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
+ check_feat := 0
+endif
+endif
+
+ifeq ($(check_feat),1)
+ifeq ($(FEATURES_DUMP),)
+include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
+endif
+
+LIBS = $(LIBBPF) -lelf -lz
+LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
+ifeq ($(feature-libcap), 1)
+CFLAGS += -DUSE_LIBCAP
+LIBS += -lcap
+endif
+
+include $(wildcard $(OUTPUT)*.d)
+
+all: $(OUTPUT)bpftool
+
+SRCS := $(wildcard *.c)
+
+ifeq ($(feature-llvm),1)
+ # If LLVM is available, use it for JIT disassembly
+ CFLAGS += -DHAVE_LLVM_SUPPORT
+ LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
+ CFLAGS += $(shell $(LLVM_CONFIG) --cflags --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LIBS += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ ifeq ($(shell $(LLVM_CONFIG) --shared-mode),static)
+ LIBS += $(shell $(LLVM_CONFIG) --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LIBS += -lstdc++
+ endif
+ LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
+else
+ # Fall back on libbfd
+ ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+ else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+ else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+ endif
+
+ # If one of the above feature combinations is set, we support libbfd
+ ifneq ($(filter -lbfd,$(LIBS)),)
+ CFLAGS += -DHAVE_LIBBFD_SUPPORT
+
+ # Libbfd interface changed over time, figure out what we need
+ ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+ endif
+ ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+ endif
+ endif
+endif
+ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),)
+ # No support for JIT disassembly
+ SRCS := $(filter-out jit_disasm.c,$(SRCS))
+endif
+
+HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
+ $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
+
+BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
+
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
+$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
+
+OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
+$(OBJS): $(LIBBPF) $(LIBBPF_INTERNAL_HDRS)
+
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
+bootstrap: $(BPFTOOL_BOOTSTRAP)
+
+ifneq ($(VMLINUX_BTF)$(VMLINUX_H),)
+ifeq ($(feature-clang-bpf-co-re),1)
+
+BUILD_BPF_SKELS := 1
+
+$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
+ifeq ($(VMLINUX_H),)
+ $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
+ $(QUIET_CLANG)$(CLANG) \
+ -I$(or $(OUTPUT),.) \
+ -I$(srctree)/tools/include/uapi/ \
+ -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
+ -g -O2 -Wall -fno-stack-protector \
+ --target=bpf -c $< -o $@
+ $(Q)$(LLVM_STRIP) -g $@
+
+$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
+ $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
+
+$(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h
+
+$(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h
+
+endif
+endif
+
+CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
+
+$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
+ $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
+
+$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
+
+$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
+ $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
+
+$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
+
+$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
+ $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
+
+$(OUTPUT)%.o: %.c
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
+
+feature-detect-clean:
+ $(call QUIET_CLEAN, feature-detect)
+ $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+
+clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean
+ $(call QUIET_CLEAN, bpftool)
+ $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) -- $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
+ $(Q)$(RM) -r -- $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT)
+ $(call QUIET_CLEAN, core-gen)
+ $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
+ $(Q)$(RM) -r -- $(OUTPUT)feature/
+
+install-bin: $(OUTPUT)bpftool
+ $(call QUIET_INSTALL, bpftool)
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
+ $(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+
+install: install-bin
+ $(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
+ $(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
+
+uninstall:
+ $(call QUIET_UNINST, bpftool)
+ $(Q)$(RM) -- $(DESTDIR)$(prefix)/sbin/bpftool
+ $(Q)$(RM) -- $(DESTDIR)$(bash_compdir)/bpftool
+
+doc:
+ $(call descend,Documentation)
+
+doc-clean:
+ $(call descend,Documentation,clean)
+
+doc-install:
+ $(call descend,Documentation,install)
+
+doc-uninstall:
+ $(call descend,Documentation,uninstall)
+
+FORCE:
+
+.SECONDARY:
+.PHONY: all FORCE bootstrap clean install-bin install uninstall
+.PHONY: doc doc-clean doc-install doc-uninstall
+.DEFAULT_GOAL := all
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
new file mode 100644
index 0000000000..085bf18f36
--- /dev/null
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -0,0 +1,1211 @@
+# bpftool(8) bash completion -*- shell-script -*-
+#
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2017-2018 Netronome Systems, Inc.
+#
+# Author: Quentin Monnet <quentin.monnet@netronome.com>
+
+# Takes a list of words in argument; each one of them is added to COMPREPLY if
+# it is not already present on the command line. Returns no value.
+_bpftool_once_attr()
+{
+ local w idx found
+ for w in $*; do
+ found=0
+ for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
+ if [[ $w == ${words[idx]} ]]; then
+ found=1
+ break
+ fi
+ done
+ [[ $found -eq 0 ]] && \
+ COMPREPLY+=( $( compgen -W "$w" -- "$cur" ) )
+ done
+}
+
+# Takes a list of words as argument; if any of those words is present on the
+# command line, return 0. Otherwise, return 1.
+_bpftool_search_list()
+{
+ local w idx
+ for w in $*; do
+ for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
+ [[ $w == ${words[idx]} ]] && return 0
+ done
+ done
+ return 1
+}
+
+# Takes a list of words in argument; adds them all to COMPREPLY if none of them
+# is already present on the command line. Returns no value.
+_bpftool_one_of_list()
+{
+ _bpftool_search_list $* && return 1
+ COMPREPLY+=( $( compgen -W "$*" -- "$cur" ) )
+}
+
+_bpftool_get_map_ids()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+# Takes map type and adds matching map ids to the list of suggestions.
+_bpftool_get_map_ids_for_type()
+{
+ local type="$1"
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
+ command grep -C2 "$type" | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_map_names()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
+ command sed -n 's/.*"name": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+# Takes map type and adds matching map names to the list of suggestions.
+_bpftool_get_map_names_for_type()
+{
+ local type="$1"
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp map 2>&1 | \
+ command grep -C2 "$type" | \
+ command sed -n 's/.*"name": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_prog_ids()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_prog_tags()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
+ command sed -n 's/.*"tag": "\(.*\)",$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_prog_names()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \
+ command sed -n 's/.*"name": "\(.*\)",$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_btf_ids()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_link_ids()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp link 2>&1 | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
+_bpftool_get_obj_map_names()
+{
+ local obj
+
+ obj=$1
+
+ maps=$(objdump -j maps -t $obj 2>/dev/null | \
+ command awk '/g . maps/ {print $NF}')
+
+ COMPREPLY+=( $( compgen -W "$maps" -- "$cur" ) )
+}
+
+_bpftool_get_obj_map_idxs()
+{
+ local obj
+
+ obj=$1
+
+ nmaps=$(objdump -j maps -t $obj 2>/dev/null | grep -c 'g . maps')
+
+ COMPREPLY+=( $( compgen -W "$(seq 0 $((nmaps - 1)))" -- "$cur" ) )
+}
+
+_sysfs_get_netdevs()
+{
+ COMPREPLY+=( $( compgen -W "$( ls /sys/class/net 2>/dev/null )" -- \
+ "$cur" ) )
+}
+
+# Retrieve type of the map that we are operating on.
+_bpftool_map_guess_map_type()
+{
+ local keyword ref
+ for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
+ case "${words[$((idx-2))]}" in
+ lookup|update)
+ keyword=${words[$((idx-1))]}
+ ref=${words[$((idx))]}
+ ;;
+ push)
+ printf "stack"
+ return 0
+ ;;
+ enqueue)
+ printf "queue"
+ return 0
+ ;;
+ esac
+ done
+ [[ -z $ref ]] && return 0
+
+ local type
+ type=$(bpftool -jp map show $keyword $ref | \
+ command sed -n 's/.*"type": "\(.*\)",$/\1/p')
+ [[ -n $type ]] && printf $type
+}
+
+_bpftool_map_update_get_id()
+{
+ local command="$1"
+
+ # Is it the map to update, or a map to insert into the map to update?
+ # Search for "value" keyword.
+ local idx value
+ for (( idx=7; idx < ${#words[@]}-1; idx++ )); do
+ if [[ ${words[idx]} == "value" ]]; then
+ value=1
+ break
+ fi
+ done
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ fi
+
+ # Id to complete is for a value. It can be either prog id or map id. This
+ # depends on the type of the map to update.
+ local type=$(_bpftool_map_guess_map_type)
+ case $type in
+ array_of_maps|hash_of_maps)
+ _bpftool_get_map_ids
+ return 0
+ ;;
+ prog_array)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+}
+
+_bpftool_map_update_get_name()
+{
+ local command="$1"
+
+ # Is it the map to update, or a map to insert into the map to update?
+ # Search for "value" keyword.
+ local idx value
+ for (( idx=7; idx < ${#words[@]}-1; idx++ )); do
+ if [[ ${words[idx]} == "value" ]]; then
+ value=1
+ break
+ fi
+ done
+ if [[ $value -eq 0 ]]; then
+ case "$command" in
+ push)
+ _bpftool_get_map_names_for_type stack
+ ;;
+ enqueue)
+ _bpftool_get_map_names_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_names
+ ;;
+ esac
+ return 0
+ fi
+
+ # Name to complete is for a value. It can be either prog name or map name. This
+ # depends on the type of the map to update.
+ local type=$(_bpftool_map_guess_map_type)
+ case $type in
+ array_of_maps|hash_of_maps)
+ _bpftool_get_map_names
+ return 0
+ ;;
+ prog_array)
+ _bpftool_get_prog_names
+ return 0
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+}
+
+_bpftool()
+{
+ local cur prev words objword json=0
+ _init_completion || return
+
+ # Deal with options
+ if [[ ${words[cword]} == -* ]]; then
+ local c='--version --json --pretty --bpffs --mapcompat --debug \
+ --use-loader --base-btf'
+ COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
+ return 0
+ fi
+ if _bpftool_search_list -j --json -p --pretty; then
+ json=1
+ fi
+
+ # Deal with simplest keywords
+ case $prev in
+ help|hex)
+ return 0
+ ;;
+ tag)
+ _bpftool_get_prog_tags
+ return 0
+ ;;
+ dev|offload_dev|xdpmeta_dev)
+ _sysfs_get_netdevs
+ return 0
+ ;;
+ file|pinned|-B|--base-btf)
+ _filedir
+ return 0
+ ;;
+ batch)
+ COMPREPLY=( $( compgen -W 'file' -- "$cur" ) )
+ return 0
+ ;;
+ esac
+
+ # Remove all options so completions don't have to deal with them.
+ local i
+ for (( i=1; i < ${#words[@]}; )); do
+ if [[ ${words[i]::1} == - ]] &&
+ [[ ${words[i]} != "-B" ]] && [[ ${words[i]} != "--base-btf" ]]; then
+ words=( "${words[@]:0:i}" "${words[@]:i+1}" )
+ [[ $i -le $cword ]] && cword=$(( cword - 1 ))
+ else
+ i=$(( ++i ))
+ fi
+ done
+ cur=${words[cword]}
+ prev=${words[cword - 1]}
+ pprev=${words[cword - 2]}
+
+ local object=${words[1]} command=${words[2]}
+
+ if [[ -z $object || $cword -eq 1 ]]; then
+ case $cur in
+ *)
+ COMPREPLY=( $( compgen -W "$( bpftool help 2>&1 | \
+ command sed \
+ -e '/OBJECT := /!d' \
+ -e 's/.*{//' \
+ -e 's/}.*//' \
+ -e 's/|//g' )" -- "$cur" ) )
+ COMPREPLY+=( $( compgen -W 'batch help' -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ fi
+
+ [[ $command == help ]] && return 0
+
+ # Completion depends on object and command in use
+ case $object in
+ prog)
+ # Complete id and name, only for subcommands that use prog (but no
+ # map) ids/names.
+ case $command in
+ show|list|dump|pin)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ name)
+ _bpftool_get_prog_names
+ return 0
+ ;;
+ esac
+ ;;
+ esac
+
+ local PROG_TYPE='id pinned tag name'
+ local MAP_TYPE='id pinned name'
+ local METRIC_TYPE='cycles instructions l1d_loads llc_misses \
+ itlb_misses dtlb_misses'
+ case $command in
+ show|list)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ dump)
+ case $prev in
+ $command)
+ COMPREPLY+=( $( compgen -W "xlated jited" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ xlated|jited)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ *)
+ # "file" is not compatible with other keywords here
+ if _bpftool_search_list 'file'; then
+ return 0
+ fi
+ if ! _bpftool_search_list 'linum opcodes visual'; then
+ _bpftool_once_attr 'file'
+ fi
+ _bpftool_once_attr 'linum opcodes'
+ if _bpftool_search_list 'xlated' && [[ "$json" == 0 ]]; then
+ _bpftool_once_attr 'visual'
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ pin)
+ if [[ $prev == "$command" ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ else
+ _filedir
+ fi
+ return 0
+ ;;
+ attach|detach)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ local BPFTOOL_PROG_ATTACH_TYPES='sk_msg_verdict \
+ sk_skb_verdict sk_skb_stream_verdict sk_skb_stream_parser \
+ flow_dissector'
+ COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_ATTACH_TYPES" -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 7)
+ case $prev in
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ esac
+ ;;
+ load|loadall)
+ local obj
+
+ # Propose "load/loadall" to complete "bpftool prog load",
+ # or bash tries to complete "load" as a filename below.
+ if [[ ${#words[@]} -eq 3 ]]; then
+ COMPREPLY=( $( compgen -W "load loadall" -- "$cur" ) )
+ return 0
+ fi
+
+ if [[ ${#words[@]} -lt 6 ]]; then
+ _filedir
+ return 0
+ fi
+
+ obj=${words[3]}
+
+ if [[ ${words[-4]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
+ return 0
+ fi
+ if [[ ${words[-3]} == "map" ]]; then
+ if [[ ${words[-2]} == "idx" ]]; then
+ _bpftool_get_obj_map_idxs $obj
+ elif [[ ${words[-2]} == "name" ]]; then
+ _bpftool_get_obj_map_names $obj
+ fi
+ return 0
+ fi
+ if [[ ${words[-2]} == "map" ]]; then
+ COMPREPLY=( $( compgen -W "idx name" -- "$cur" ) )
+ return 0
+ fi
+
+ case $prev in
+ type)
+ local BPFTOOL_PROG_LOAD_TYPES='socket kprobe \
+ kretprobe classifier flow_dissector \
+ action tracepoint raw_tracepoint \
+ xdp perf_event cgroup/skb cgroup/sock \
+ cgroup/dev lwt_in lwt_out lwt_xmit \
+ lwt_seg6local sockops sk_skb sk_msg \
+ lirc_mode2 cgroup/bind4 cgroup/bind6 \
+ cgroup/connect4 cgroup/connect6 \
+ cgroup/getpeername4 cgroup/getpeername6 \
+ cgroup/getsockname4 cgroup/getsockname6 \
+ cgroup/sendmsg4 cgroup/sendmsg6 \
+ cgroup/recvmsg4 cgroup/recvmsg6 \
+ cgroup/post_bind4 cgroup/post_bind6 \
+ cgroup/sysctl cgroup/getsockopt \
+ cgroup/setsockopt cgroup/sock_release struct_ops \
+ fentry fexit freplace sk_lookup'
+ COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_LOAD_TYPES" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ return 0
+ ;;
+ name)
+ _bpftool_get_map_names
+ return 0
+ ;;
+ pinned|pinmaps)
+ _filedir
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
+ _bpftool_once_attr 'type pinmaps autoattach'
+ _bpftool_one_of_list 'offload_dev xdpmeta_dev'
+ return 0
+ ;;
+ esac
+ ;;
+ tracelog)
+ return 0
+ ;;
+ profile)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE duration" -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ case $prev in
+ duration)
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ COMPREPLY=( $( compgen -W "$METRIC_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ ;;
+ run)
+ if [[ ${#words[@]} -eq 4 ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ fi
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ name)
+ _bpftool_get_prog_names
+ return 0
+ ;;
+ data_in|data_out|ctx_in|ctx_out)
+ _filedir
+ return 0
+ ;;
+ repeat|data_size_out|ctx_size_out)
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'data_in data_out data_size_out \
+ ctx_in ctx_out ctx_size_out repeat'
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'dump help pin attach detach \
+ load loadall show list tracelog run profile' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ struct_ops)
+ local STRUCT_OPS_TYPE='id name'
+ case $command in
+ show|list|dump|unregister)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$STRUCT_OPS_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids_for_type struct_ops
+ ;;
+ name)
+ _bpftool_get_map_names_for_type struct_ops
+ ;;
+ esac
+ return 0
+ ;;
+ register)
+ _filedir
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'register unregister show list dump help' \
+ -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ iter)
+ case $command in
+ pin)
+ case $prev in
+ $command)
+ _filedir
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ *)
+ _bpftool_one_of_list $MAP_TYPE
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'pin help' \
+ -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ map)
+ local MAP_TYPE='id pinned name'
+ case $command in
+ show|list|dump|peek|pop|dequeue|freeze)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ case "$command" in
+ peek)
+ _bpftool_get_map_ids_for_type stack
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_ids_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_ids_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_ids
+ ;;
+ esac
+ return 0
+ ;;
+ name)
+ case "$command" in
+ peek)
+ _bpftool_get_map_names_for_type stack
+ _bpftool_get_map_names_for_type queue
+ ;;
+ pop)
+ _bpftool_get_map_names_for_type stack
+ ;;
+ dequeue)
+ _bpftool_get_map_names_for_type queue
+ ;;
+ *)
+ _bpftool_get_map_names
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+ ;;
+ create)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ type)
+ local BPFTOOL_MAP_CREATE_TYPES="$(bpftool feature list_builtins map_types 2>/dev/null | \
+ grep -v '^unspec$')"
+ COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
+ return 0
+ ;;
+ key|value|flags|entries)
+ return 0
+ ;;
+ inner_map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ case $pprev in
+ inner_map)
+ _bpftool_get_map_names
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ _bpftool_once_attr 'type key value entries name flags offload_dev'
+ if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
+ _bpftool_once_attr 'inner_map'
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ lookup|getnext|delete)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ return 0
+ ;;
+ name)
+ _bpftool_get_map_names
+ return 0
+ ;;
+ key)
+ COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
+ ;;
+ *)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ return 0
+ ;;
+ esac
+
+ _bpftool_once_attr 'key'
+ return 0
+ ;;
+ esac
+ ;;
+ update|push|enqueue)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_map_update_get_id $command
+ return 0
+ ;;
+ name)
+ _bpftool_map_update_get_name $command
+ return 0
+ ;;
+ key)
+ COMPREPLY+=( $( compgen -W 'hex' -- "$cur" ) )
+ ;;
+ value)
+ # We can have bytes, or references to a prog or a
+ # map, depending on the type of the map to update.
+ case "$(_bpftool_map_guess_map_type)" in
+ array_of_maps|hash_of_maps)
+ local MAP_TYPE='id pinned name'
+ COMPREPLY+=( $( compgen -W "$MAP_TYPE" \
+ -- "$cur" ) )
+ return 0
+ ;;
+ prog_array)
+ local PROG_TYPE='id pinned tag name'
+ COMPREPLY+=( $( compgen -W "$PROG_TYPE" \
+ -- "$cur" ) )
+ return 0
+ ;;
+ *)
+ COMPREPLY+=( $( compgen -W 'hex' \
+ -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ case $(_bpftool_map_guess_map_type) in
+ queue|stack)
+ _bpftool_once_attr 'value'
+ return 0;
+ ;;
+ esac
+
+ _bpftool_once_attr 'key'
+ local UPDATE_FLAGS='any exist noexist'
+ for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
+ if [[ ${words[idx]} == 'value' ]]; then
+ # 'value' is present, but is not the last
+ # word i.e. we can now have UPDATE_FLAGS.
+ _bpftool_one_of_list "$UPDATE_FLAGS"
+ return 0
+ fi
+ done
+ for (( idx=3; idx < ${#words[@]}-1; idx++ )); do
+ if [[ ${words[idx]} == 'key' ]]; then
+ # 'key' is present, but is not the last
+ # word i.e. we can now have 'value'.
+ _bpftool_once_attr 'value'
+ return 0
+ fi
+ done
+
+ return 0
+ ;;
+ esac
+ ;;
+ pin)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ esac
+ return 0
+ ;;
+ event_pipe)
+ case $prev in
+ $command)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids_for_type perf_event_array
+ return 0
+ ;;
+ name)
+ _bpftool_get_map_names_for_type perf_event_array
+ return 0
+ ;;
+ cpu)
+ return 0
+ ;;
+ index)
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'cpu index'
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'delete dump getnext help \
+ lookup pin event_pipe show list update create \
+ peek push enqueue pop dequeue freeze' -- \
+ "$cur" ) )
+ ;;
+ esac
+ ;;
+ btf)
+ local PROG_TYPE='id pinned tag name'
+ local MAP_TYPE='id pinned name'
+ case $command in
+ dump)
+ case $prev in
+ $command)
+ COMPREPLY+=( $( compgen -W "id map prog file" -- \
+ "$cur" ) )
+ return 0
+ ;;
+ prog)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ case $pprev in
+ prog)
+ _bpftool_get_prog_ids
+ ;;
+ map)
+ _bpftool_get_map_ids
+ ;;
+ $command)
+ _bpftool_get_btf_ids
+ ;;
+ esac
+ return 0
+ ;;
+ name)
+ case $pprev in
+ prog)
+ _bpftool_get_prog_names
+ ;;
+ map)
+ _bpftool_get_map_names
+ ;;
+ esac
+ return 0
+ ;;
+ format)
+ COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) )
+ ;;
+ *)
+ # emit extra options
+ case ${words[3]} in
+ id|file)
+ _bpftool_once_attr 'format'
+ ;;
+ map|prog)
+ if [[ ${words[3]} == "map" ]] && [[ $cword == 6 ]]; then
+ COMPREPLY+=( $( compgen -W "key value kv all" -- "$cur" ) )
+ fi
+ _bpftool_once_attr 'format'
+ ;;
+ *)
+ ;;
+ esac
+ return 0
+ ;;
+ esac
+ ;;
+ show|list)
+ case $prev in
+ $command)
+ COMPREPLY+=( $( compgen -W "id" -- "$cur" ) )
+ ;;
+ id)
+ _bpftool_get_btf_ids
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'dump help show list' \
+ -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ gen)
+ case $command in
+ object)
+ _filedir
+ return 0
+ ;;
+ skeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
+ ;;
+ subskeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
+ ;;
+ min_core_btf)
+ _filedir
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ cgroup)
+ case $command in
+ show|list|tree)
+ case $cword in
+ 3)
+ _filedir
+ ;;
+ 4)
+ COMPREPLY=( $( compgen -W 'effective' -- "$cur" ) )
+ ;;
+ esac
+ return 0
+ ;;
+ attach|detach)
+ local BPFTOOL_CGROUP_ATTACH_TYPES="$(bpftool feature list_builtins attach_types 2>/dev/null | \
+ grep '^cgroup_')"
+ local ATTACH_FLAGS='multi override'
+ local PROG_TYPE='id pinned tag name'
+ # Check for $prev = $command first
+ if [ $prev = $command ]; then
+ _filedir
+ return 0
+ # Then check for attach type. This is done outside of the
+ # "case $prev in" to avoid writing the whole list of attach
+ # types again as pattern to match (where we cannot reuse
+ # our variable).
+ elif [[ $BPFTOOL_CGROUP_ATTACH_TYPES =~ $prev ]]; then
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
+ "$cur" ) )
+ return 0
+ fi
+ # case/esac for the other cases
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ *)
+ if ! _bpftool_search_list "$BPFTOOL_CGROUP_ATTACH_TYPES"; then
+ COMPREPLY=( $( compgen -W \
+ "$BPFTOOL_CGROUP_ATTACH_TYPES" -- "$cur" ) )
+ elif [[ "$command" == "attach" ]]; then
+ # We have an attach type on the command line,
+ # but it is not the previous word, or
+ # "id|pinned|tag|name" (we already checked for
+ # that). This should only leave the case when
+ # we need attach flags for "attach" commamnd.
+ _bpftool_one_of_list "$ATTACH_FLAGS"
+ fi
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help attach detach \
+ show list tree' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ perf)
+ case $command in
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help \
+ show list' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ net)
+ local PROG_TYPE='id pinned tag name'
+ local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload'
+ case $command in
+ show|list)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) )
+ return 0
+ ;;
+ attach)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 5)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ name)
+ _bpftool_get_prog_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 6)
+ COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) )
+ return 0
+ ;;
+ 8)
+ _bpftool_once_attr 'overwrite'
+ return 0
+ ;;
+ esac
+ ;;
+ detach)
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) )
+ return 0
+ ;;
+ esac
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help \
+ show list attach detach' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ feature)
+ case $command in
+ probe)
+ [[ $prev == "prefix" ]] && return 0
+ if _bpftool_search_list 'macros'; then
+ _bpftool_once_attr 'prefix'
+ else
+ COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
+ fi
+ _bpftool_one_of_list 'kernel dev'
+ _bpftool_once_attr 'full unprivileged'
+ return 0
+ ;;
+ list_builtins)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W 'prog_types map_types \
+ attach_types link_types helpers' -- "$cur" ) )
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help list_builtins probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ link)
+ case $command in
+ show|list|pin|detach)
+ case $prev in
+ id)
+ _bpftool_get_link_ids
+ return 0
+ ;;
+ esac
+ ;;
+ esac
+
+ local LINK_TYPE='id pinned'
+ case $command in
+ show|list)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ pin|detach)
+ if [[ $prev == "$command" ]]; then
+ COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) )
+ else
+ _filedir
+ fi
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help pin show list' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ esac
+} &&
+complete -F _bpftool bpftool
+
+# ex: ts=4 sw=4 et filetype=sh
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
new file mode 100644
index 0000000000..91fcb75bab
--- /dev/null
+++ b/tools/bpf/bpftool/btf.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Facebook */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/err.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/btf.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+static const char * const btf_kind_str[NR_BTF_KINDS] = {
+ [BTF_KIND_UNKN] = "UNKNOWN",
+ [BTF_KIND_INT] = "INT",
+ [BTF_KIND_PTR] = "PTR",
+ [BTF_KIND_ARRAY] = "ARRAY",
+ [BTF_KIND_STRUCT] = "STRUCT",
+ [BTF_KIND_UNION] = "UNION",
+ [BTF_KIND_ENUM] = "ENUM",
+ [BTF_KIND_FWD] = "FWD",
+ [BTF_KIND_TYPEDEF] = "TYPEDEF",
+ [BTF_KIND_VOLATILE] = "VOLATILE",
+ [BTF_KIND_CONST] = "CONST",
+ [BTF_KIND_RESTRICT] = "RESTRICT",
+ [BTF_KIND_FUNC] = "FUNC",
+ [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
+ [BTF_KIND_VAR] = "VAR",
+ [BTF_KIND_DATASEC] = "DATASEC",
+ [BTF_KIND_FLOAT] = "FLOAT",
+ [BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
+ [BTF_KIND_ENUM64] = "ENUM64",
+};
+
+static const char *btf_int_enc_str(__u8 encoding)
+{
+ switch (encoding) {
+ case 0:
+ return "(none)";
+ case BTF_INT_SIGNED:
+ return "SIGNED";
+ case BTF_INT_CHAR:
+ return "CHAR";
+ case BTF_INT_BOOL:
+ return "BOOL";
+ default:
+ return "UNKN";
+ }
+}
+
+static const char *btf_var_linkage_str(__u32 linkage)
+{
+ switch (linkage) {
+ case BTF_VAR_STATIC:
+ return "static";
+ case BTF_VAR_GLOBAL_ALLOCATED:
+ return "global";
+ case BTF_VAR_GLOBAL_EXTERN:
+ return "extern";
+ default:
+ return "(unknown)";
+ }
+}
+
+static const char *btf_func_linkage_str(const struct btf_type *t)
+{
+ switch (btf_vlen(t)) {
+ case BTF_FUNC_STATIC:
+ return "static";
+ case BTF_FUNC_GLOBAL:
+ return "global";
+ case BTF_FUNC_EXTERN:
+ return "extern";
+ default:
+ return "(unknown)";
+ }
+}
+
+static const char *btf_str(const struct btf *btf, __u32 off)
+{
+ if (!off)
+ return "(anon)";
+ return btf__name_by_offset(btf, off) ? : "(invalid)";
+}
+
+static int btf_kind_safe(int kind)
+{
+ return kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+}
+
+static int dump_btf_type(const struct btf *btf, __u32 id,
+ const struct btf_type *t)
+{
+ json_writer_t *w = json_wtr;
+ int kind = btf_kind(t);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_uint_field(w, "id", id);
+ jsonw_string_field(w, "kind", btf_kind_str[btf_kind_safe(kind)]);
+ jsonw_string_field(w, "name", btf_str(btf, t->name_off));
+ } else {
+ printf("[%u] %s '%s'", id, btf_kind_str[btf_kind_safe(kind)],
+ btf_str(btf, t->name_off));
+ }
+
+ switch (kind) {
+ case BTF_KIND_INT: {
+ __u32 v = *(__u32 *)(t + 1);
+ const char *enc;
+
+ enc = btf_int_enc_str(BTF_INT_ENCODING(v));
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "bits_offset", BTF_INT_OFFSET(v));
+ jsonw_uint_field(w, "nr_bits", BTF_INT_BITS(v));
+ jsonw_string_field(w, "encoding", enc);
+ } else {
+ printf(" size=%u bits_offset=%u nr_bits=%u encoding=%s",
+ t->size, BTF_INT_OFFSET(v), BTF_INT_BITS(v),
+ enc);
+ }
+ break;
+ }
+ case BTF_KIND_PTR:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
+ if (json_output)
+ jsonw_uint_field(w, "type_id", t->type);
+ else
+ printf(" type_id=%u", t->type);
+ break;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *arr = (const void *)(t + 1);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", arr->type);
+ jsonw_uint_field(w, "index_type_id", arr->index_type);
+ jsonw_uint_field(w, "nr_elems", arr->nelems);
+ } else {
+ printf(" type_id=%u index_type_id=%u nr_elems=%u",
+ arr->type, arr->index_type, arr->nelems);
+ }
+ break;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "members");
+ jsonw_start_array(w);
+ } else {
+ printf(" size=%u vlen=%u", t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, m++) {
+ const char *name = btf_str(btf, m->name_off);
+ __u32 bit_off, bit_sz;
+
+ if (BTF_INFO_KFLAG(t->info)) {
+ bit_off = BTF_MEMBER_BIT_OFFSET(m->offset);
+ bit_sz = BTF_MEMBER_BITFIELD_SIZE(m->offset);
+ } else {
+ bit_off = m->offset;
+ bit_sz = 0;
+ }
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ jsonw_uint_field(w, "type_id", m->type);
+ jsonw_uint_field(w, "bits_offset", bit_off);
+ if (bit_sz) {
+ jsonw_uint_field(w, "bitfield_size",
+ bit_sz);
+ }
+ jsonw_end_object(w);
+ } else {
+ printf("\n\t'%s' type_id=%u bits_offset=%u",
+ name, m->type, bit_off);
+ if (bit_sz)
+ printf(" bitfield_size=%u", bit_sz);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ const struct btf_enum *v = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ const char *encoding;
+ int i;
+
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
+ if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "values");
+ jsonw_start_array(w);
+ } else {
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ const char *name = btf_str(btf, v->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", v->val);
+ else
+ jsonw_uint_field(w, "val", v->val);
+ jsonw_end_object(w);
+ } else {
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%d", name, v->val);
+ else
+ printf("\n\t'%s' val=%u", name, v->val);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *v = btf_enum64(t);
+ __u16 vlen = btf_vlen(t);
+ const char *encoding;
+ int i;
+
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
+ if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "values");
+ jsonw_start_array(w);
+ } else {
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ const char *name = btf_str(btf, v->name_off);
+ __u64 val = ((__u64)v->val_hi32 << 32) | v->val_lo32;
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", val);
+ else
+ jsonw_uint_field(w, "val", val);
+ jsonw_end_object(w);
+ } else {
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%lldLL", name,
+ (unsigned long long)val);
+ else
+ printf("\n\t'%s' val=%lluULL", name,
+ (unsigned long long)val);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_FWD: {
+ const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union"
+ : "struct";
+
+ if (json_output)
+ jsonw_string_field(w, "fwd_kind", fwd_kind);
+ else
+ printf(" fwd_kind=%s", fwd_kind);
+ break;
+ }
+ case BTF_KIND_FUNC: {
+ const char *linkage = btf_func_linkage_str(t);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", t->type);
+ jsonw_string_field(w, "linkage", linkage);
+ } else {
+ printf(" type_id=%u linkage=%s", t->type, linkage);
+ }
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *p = (const void *)(t + 1);
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "ret_type_id", t->type);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "params");
+ jsonw_start_array(w);
+ } else {
+ printf(" ret_type_id=%u vlen=%u", t->type, vlen);
+ }
+ for (i = 0; i < vlen; i++, p++) {
+ const char *name = btf_str(btf, p->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ jsonw_uint_field(w, "type_id", p->type);
+ jsonw_end_object(w);
+ } else {
+ printf("\n\t'%s' type_id=%u", name, p->type);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_VAR: {
+ const struct btf_var *v = (const void *)(t + 1);
+ const char *linkage;
+
+ linkage = btf_var_linkage_str(v->linkage);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", t->type);
+ jsonw_string_field(w, "linkage", linkage);
+ } else {
+ printf(" type_id=%u, linkage=%s", t->type, linkage);
+ }
+ break;
+ }
+ case BTF_KIND_DATASEC: {
+ const struct btf_var_secinfo *v = (const void *)(t + 1);
+ const struct btf_type *vt;
+ __u16 vlen = BTF_INFO_VLEN(t->info);
+ int i;
+
+ if (json_output) {
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "vars");
+ jsonw_start_array(w);
+ } else {
+ printf(" size=%u vlen=%u", t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_uint_field(w, "type_id", v->type);
+ jsonw_uint_field(w, "offset", v->offset);
+ jsonw_uint_field(w, "size", v->size);
+ jsonw_end_object(w);
+ } else {
+ printf("\n\ttype_id=%u offset=%u size=%u",
+ v->type, v->offset, v->size);
+
+ if (v->type < btf__type_cnt(btf)) {
+ vt = btf__type_by_id(btf, v->type);
+ printf(" (%s '%s')",
+ btf_kind_str[btf_kind_safe(btf_kind(vt))],
+ btf_str(btf, vt->name_off));
+ }
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_FLOAT: {
+ if (json_output)
+ jsonw_uint_field(w, "size", t->size);
+ else
+ printf(" size=%u", t->size);
+ break;
+ }
+ case BTF_KIND_DECL_TAG: {
+ const struct btf_decl_tag *tag = (const void *)(t + 1);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", t->type);
+ jsonw_int_field(w, "component_idx", tag->component_idx);
+ } else {
+ printf(" type_id=%u component_idx=%d", t->type, tag->component_idx);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ return 0;
+}
+
+static int dump_btf_raw(const struct btf *btf,
+ __u32 *root_type_ids, int root_type_cnt)
+{
+ const struct btf_type *t;
+ int i;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "types");
+ jsonw_start_array(json_wtr);
+ }
+
+ if (root_type_cnt) {
+ for (i = 0; i < root_type_cnt; i++) {
+ t = btf__type_by_id(btf, root_type_ids[i]);
+ dump_btf_type(btf, root_type_ids[i], t);
+ }
+ } else {
+ const struct btf *base;
+ int cnt = btf__type_cnt(btf);
+ int start_id = 1;
+
+ base = btf__base_btf(btf);
+ if (base)
+ start_id = btf__type_cnt(base);
+
+ for (i = start_id; i < cnt; i++) {
+ t = btf__type_by_id(btf, i);
+ dump_btf_type(btf, i, t);
+ }
+ }
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_end_object(json_wtr);
+ }
+ return 0;
+}
+
+static void __printf(2, 0) btf_dump_printf(void *ctx,
+ const char *fmt, va_list args)
+{
+ vfprintf(stdout, fmt, args);
+}
+
+static int dump_btf_c(const struct btf *btf,
+ __u32 *root_type_ids, int root_type_cnt)
+{
+ struct btf_dump *d;
+ int err = 0, i;
+
+ d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ printf("#ifndef __VMLINUX_H__\n");
+ printf("#define __VMLINUX_H__\n");
+ printf("\n");
+ printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
+ printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n");
+ printf("#endif\n\n");
+
+ if (root_type_cnt) {
+ for (i = 0; i < root_type_cnt; i++) {
+ err = btf_dump__dump_type(d, root_type_ids[i]);
+ if (err)
+ goto done;
+ }
+ } else {
+ int cnt = btf__type_cnt(btf);
+
+ for (i = 1; i < cnt; i++) {
+ err = btf_dump__dump_type(d, i);
+ if (err)
+ goto done;
+ }
+ }
+
+ printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n");
+ printf("#pragma clang attribute pop\n");
+ printf("#endif\n");
+ printf("\n");
+ printf("#endif /* __VMLINUX_H__ */\n");
+
+done:
+ btf_dump__free(d);
+ return err;
+}
+
+static const char sysfs_vmlinux[] = "/sys/kernel/btf/vmlinux";
+
+static struct btf *get_vmlinux_btf_from_sysfs(void)
+{
+ struct btf *base;
+
+ base = btf__parse(sysfs_vmlinux, NULL);
+ if (!base)
+ p_err("failed to parse vmlinux BTF at '%s': %d\n",
+ sysfs_vmlinux, -errno);
+
+ return base;
+}
+
+#define BTF_NAME_BUFF_LEN 64
+
+static bool btf_is_kernel_module(__u32 btf_id)
+{
+ struct bpf_btf_info btf_info = {};
+ char btf_name[BTF_NAME_BUFF_LEN];
+ int btf_fd;
+ __u32 len;
+ int err;
+
+ btf_fd = bpf_btf_get_fd_by_id(btf_id);
+ if (btf_fd < 0) {
+ p_err("can't get BTF object by id (%u): %s", btf_id, strerror(errno));
+ return false;
+ }
+
+ len = sizeof(btf_info);
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
+ close(btf_fd);
+ if (err) {
+ p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
+ return false;
+ }
+
+ return btf_info.kernel_btf && strncmp(btf_name, "vmlinux", sizeof(btf_name)) != 0;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ struct btf *btf = NULL, *base = NULL;
+ __u32 root_type_ids[2];
+ int root_type_cnt = 0;
+ bool dump_c = false;
+ __u32 btf_id = -1;
+ const char *src;
+ int fd = -1;
+ int err = 0;
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+ src = GET_ARG();
+ if (is_prefix(src, "map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ btf_id = info.btf_id;
+ if (argc && is_prefix(*argv, "key")) {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "value")) {
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "all")) {
+ NEXT_ARG();
+ } else if (argc && is_prefix(*argv, "kv")) {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ NEXT_ARG();
+ } else {
+ root_type_ids[root_type_cnt++] = info.btf_key_type_id;
+ root_type_ids[root_type_cnt++] = info.btf_value_type_id;
+ }
+ } else if (is_prefix(src, "prog")) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ fd = prog_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ goto done;
+ }
+
+ btf_id = info.btf_id;
+ } else if (is_prefix(src, "id")) {
+ char *endptr;
+
+ btf_id = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(src, "file")) {
+ const char sysfs_prefix[] = "/sys/kernel/btf/";
+
+ if (!base_btf &&
+ strncmp(*argv, sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0 &&
+ strcmp(*argv, sysfs_vmlinux) != 0)
+ base = get_vmlinux_btf_from_sysfs();
+
+ btf = btf__parse_split(*argv, base ?: base_btf);
+ if (!btf) {
+ err = -errno;
+ p_err("failed to load BTF from %s: %s",
+ *argv, strerror(errno));
+ goto done;
+ }
+ NEXT_ARG();
+ } else {
+ err = -1;
+ p_err("unrecognized BTF source specifier: '%s'", src);
+ goto done;
+ }
+
+ while (argc) {
+ if (is_prefix(*argv, "format")) {
+ NEXT_ARG();
+ if (argc < 1) {
+ p_err("expecting value for 'format' option\n");
+ err = -EINVAL;
+ goto done;
+ }
+ if (strcmp(*argv, "c") == 0) {
+ dump_c = true;
+ } else if (strcmp(*argv, "raw") == 0) {
+ dump_c = false;
+ } else {
+ p_err("unrecognized format specifier: '%s', possible values: raw, c",
+ *argv);
+ err = -EINVAL;
+ goto done;
+ }
+ NEXT_ARG();
+ } else {
+ p_err("unrecognized option: '%s'", *argv);
+ err = -EINVAL;
+ goto done;
+ }
+ }
+
+ if (!btf) {
+ if (!base_btf && btf_is_kernel_module(btf_id)) {
+ p_info("Warning: valid base BTF was not specified with -B option, falling back to standard base BTF (%s)",
+ sysfs_vmlinux);
+ base_btf = get_vmlinux_btf_from_sysfs();
+ }
+
+ btf = btf__load_from_kernel_by_id_split(btf_id, base_btf);
+ if (!btf) {
+ err = -errno;
+ p_err("get btf by id (%u): %s", btf_id, strerror(errno));
+ goto done;
+ }
+ }
+
+ if (dump_c) {
+ if (json_output) {
+ p_err("JSON output for C-syntax dump is not supported");
+ err = -ENOTSUP;
+ goto done;
+ }
+ err = dump_btf_c(btf, root_type_ids, root_type_cnt);
+ } else {
+ err = dump_btf_raw(btf, root_type_ids, root_type_cnt);
+ }
+
+done:
+ close(fd);
+ btf__free(btf);
+ btf__free(base);
+ return err;
+}
+
+static int btf_parse_fd(int *argc, char ***argv)
+{
+ unsigned int id;
+ char *endptr;
+ int fd;
+
+ if (!is_prefix(*argv[0], "id")) {
+ p_err("expected 'id', got: '%s'?", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0)
+ p_err("can't get BTF object by id (%u): %s",
+ id, strerror(errno));
+
+ return fd;
+}
+
+static int
+build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
+ void *info, __u32 *len)
+{
+ static const char * const names[] = {
+ [BPF_OBJ_UNKNOWN] = "unknown",
+ [BPF_OBJ_PROG] = "prog",
+ [BPF_OBJ_MAP] = "map",
+ };
+ __u32 btf_id, id = 0;
+ int err;
+ int fd;
+
+ while (true) {
+ switch (type) {
+ case BPF_OBJ_PROG:
+ err = bpf_prog_get_next_id(id, &id);
+ break;
+ case BPF_OBJ_MAP:
+ err = bpf_map_get_next_id(id, &id);
+ break;
+ default:
+ err = -1;
+ p_err("unexpected object type: %d", type);
+ goto err_free;
+ }
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ p_err("can't get next %s: %s%s", names[type],
+ strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ goto err_free;
+ }
+
+ switch (type) {
+ case BPF_OBJ_PROG:
+ fd = bpf_prog_get_fd_by_id(id);
+ break;
+ case BPF_OBJ_MAP:
+ fd = bpf_map_get_fd_by_id(id);
+ break;
+ default:
+ err = -1;
+ p_err("unexpected object type: %d", type);
+ goto err_free;
+ }
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get %s by id (%u): %s", names[type], id,
+ strerror(errno));
+ err = -1;
+ goto err_free;
+ }
+
+ memset(info, 0, *len);
+ if (type == BPF_OBJ_PROG)
+ err = bpf_prog_get_info_by_fd(fd, info, len);
+ else
+ err = bpf_map_get_info_by_fd(fd, info, len);
+ close(fd);
+ if (err) {
+ p_err("can't get %s info: %s", names[type],
+ strerror(errno));
+ goto err_free;
+ }
+
+ switch (type) {
+ case BPF_OBJ_PROG:
+ btf_id = ((struct bpf_prog_info *)info)->btf_id;
+ break;
+ case BPF_OBJ_MAP:
+ btf_id = ((struct bpf_map_info *)info)->btf_id;
+ break;
+ default:
+ err = -1;
+ p_err("unexpected object type: %d", type);
+ goto err_free;
+ }
+ if (!btf_id)
+ continue;
+
+ err = hashmap__append(tab, btf_id, id);
+ if (err) {
+ p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
+ btf_id, id, strerror(-err));
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ hashmap__free(tab);
+ return err;
+}
+
+static int
+build_btf_tables(struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
+{
+ struct bpf_prog_info prog_info;
+ __u32 prog_len = sizeof(prog_info);
+ struct bpf_map_info map_info;
+ __u32 map_len = sizeof(map_info);
+ int err = 0;
+
+ err = build_btf_type_table(btf_prog_table, BPF_OBJ_PROG, &prog_info,
+ &prog_len);
+ if (err)
+ return err;
+
+ err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info,
+ &map_len);
+ if (err) {
+ hashmap__free(btf_prog_table);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+show_btf_plain(struct bpf_btf_info *info, int fd,
+ struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
+{
+ struct hashmap_entry *entry;
+ const char *name = u64_to_ptr(info->name);
+ int n;
+
+ printf("%u: ", info->id);
+ if (info->kernel_btf)
+ printf("name [%s] ", name);
+ else if (name && name[0])
+ printf("name %s ", name);
+ else
+ printf("name <anon> ");
+ printf("size %uB", info->btf_size);
+
+ n = 0;
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " prog_ids " : ",", entry->value);
+ }
+
+ n = 0;
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " map_ids " : ",", entry->value);
+ }
+
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
+ printf("\n");
+}
+
+static void
+show_btf_json(struct bpf_btf_info *info, int fd,
+ struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
+{
+ struct hashmap_entry *entry;
+ const char *name = u64_to_ptr(info->name);
+
+ jsonw_start_object(json_wtr); /* btf object */
+ jsonw_uint_field(json_wtr, "id", info->id);
+ jsonw_uint_field(json_wtr, "size", info->btf_size);
+
+ jsonw_name(json_wtr, "prog_ids");
+ jsonw_start_array(json_wtr); /* prog_ids */
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
+ }
+ jsonw_end_array(json_wtr); /* prog_ids */
+
+ jsonw_name(json_wtr, "map_ids");
+ jsonw_start_array(json_wtr); /* map_ids */
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
+ }
+ jsonw_end_array(json_wtr); /* map_ids */
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */
+
+ jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
+
+ if (name && name[0])
+ jsonw_string_field(json_wtr, "name", name);
+
+ jsonw_end_object(json_wtr); /* btf object */
+}
+
+static int
+show_btf(int fd, struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
+{
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[64];
+ int err;
+
+ memset(&info, 0, sizeof(info));
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get BTF object info: %s", strerror(errno));
+ return -1;
+ }
+ /* if kernel support emitting BTF object name, pass name pointer */
+ if (info.name_len) {
+ memset(&info, 0, sizeof(info));
+ info.name_len = sizeof(name);
+ info.name = ptr_to_u64(name);
+ len = sizeof(info);
+
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get BTF object info: %s", strerror(errno));
+ return -1;
+ }
+ }
+
+ if (json_output)
+ show_btf_json(&info, fd, btf_prog_table, btf_map_table);
+ else
+ show_btf_plain(&info, fd, btf_prog_table, btf_map_table);
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ struct hashmap *btf_prog_table;
+ struct hashmap *btf_map_table;
+ int err, fd = -1;
+ __u32 id = 0;
+
+ if (argc == 2) {
+ fd = btf_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+ }
+
+ if (argc) {
+ if (fd >= 0)
+ close(fd);
+ return BAD_ARG();
+ }
+
+ btf_prog_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ btf_map_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(btf_prog_table) || IS_ERR(btf_map_table)) {
+ hashmap__free(btf_prog_table);
+ hashmap__free(btf_map_table);
+ if (fd >= 0)
+ close(fd);
+ p_err("failed to create hashmap for object references");
+ return -1;
+ }
+ err = build_btf_tables(btf_prog_table, btf_map_table);
+ if (err) {
+ if (fd >= 0)
+ close(fd);
+ return err;
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
+
+ if (fd >= 0) {
+ err = show_btf(fd, btf_prog_table, btf_map_table);
+ close(fd);
+ goto exit_free;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr); /* root array */
+
+ while (true) {
+ err = bpf_btf_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ p_err("can't get next BTF object: %s%s",
+ strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ err = -1;
+ break;
+ }
+
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get BTF object by id (%u): %s",
+ id, strerror(errno));
+ err = -1;
+ break;
+ }
+
+ err = show_btf(fd, btf_prog_table, btf_map_table);
+ close(fd);
+ if (err)
+ break;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr); /* root array */
+
+exit_free:
+ hashmap__free(btf_prog_table);
+ hashmap__free(btf_map_table);
+ delete_obj_refs_table(refs_table);
+
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [id BTF_ID]\n"
+ " %1$s %2$s dump BTF_SRC [format FORMAT]\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
+ " FORMAT := { raw | c }\n"
+ " " HELP_SPEC_MAP "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-B|--base-btf} }\n"
+ "",
+ bin_name, "btf");
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { "dump", do_dump },
+ { 0 }
+};
+
+int do_btf(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
new file mode 100644
index 0000000000..1b7f697146
--- /dev/null
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2018 Facebook */
+
+#include <ctype.h>
+#include <stdio.h> /* for (FILE *) used by json_writer */
+#include <string.h>
+#include <unistd.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/err.h>
+#include <bpf/btf.h>
+#include <bpf/bpf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
+#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
+#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
+#define BITS_ROUNDUP_BYTES(bits) \
+ (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data);
+
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size);
+
+static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
+ const struct btf_type *func_proto,
+ __u32 prog_id)
+{
+ const struct btf_type *func_type;
+ int prog_fd = -1, func_sig_len;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const char *prog_name = NULL;
+ struct btf *prog_btf = NULL;
+ struct bpf_func_info finfo;
+ __u32 finfo_rec_size;
+ char prog_str[1024];
+ int err;
+
+ /* Get the ptr's func_proto */
+ func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
+ sizeof(prog_str));
+ if (func_sig_len == -1)
+ return -1;
+
+ if (!prog_id)
+ goto print;
+
+ /* Get the bpf_prog's name. Obtain from func_info. */
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0)
+ goto print;
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
+ goto print;
+
+ if (!info.btf_id || !info.nr_func_info)
+ goto print;
+
+ finfo_rec_size = info.func_info_rec_size;
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = finfo_rec_size;
+ info.func_info = ptr_to_u64(&finfo);
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
+ goto print;
+
+ prog_btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!prog_btf)
+ goto print;
+ func_type = btf__type_by_id(prog_btf, finfo.type_id);
+ if (!func_type || !btf_is_func(func_type))
+ goto print;
+
+ prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
+
+print:
+ if (!prog_id)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len, " 0");
+ else if (prog_name)
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " %s/prog_id:%u", prog_name, prog_id);
+ else
+ snprintf(&prog_str[func_sig_len],
+ sizeof(prog_str) - func_sig_len,
+ " <unknown_prog_name>/prog_id:%u", prog_id);
+
+ prog_str[sizeof(prog_str) - 1] = '\0';
+ jsonw_string(d->jw, prog_str);
+ btf__free(prog_btf);
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return 0;
+}
+
+static void btf_dumper_ptr(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ unsigned long value = *(unsigned long *)data;
+ const struct btf_type *ptr_type;
+ __s32 ptr_type_id;
+
+ if (!d->prog_id_as_func_ptr || value > UINT32_MAX)
+ goto print_ptr_value;
+
+ ptr_type_id = btf__resolve_type(d->btf, t->type);
+ if (ptr_type_id < 0)
+ goto print_ptr_value;
+ ptr_type = btf__type_by_id(d->btf, ptr_type_id);
+ if (!ptr_type || !btf_is_func_proto(ptr_type))
+ goto print_ptr_value;
+
+ if (!dump_prog_id_as_func_ptr(d, ptr_type, value))
+ return;
+
+print_ptr_value:
+ if (d->is_plain_text)
+ jsonw_printf(d->jw, "%p", (void *)value);
+ else
+ jsonw_printf(d->jw, "%lu", value);
+}
+
+static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ int actual_type_id;
+
+ actual_type_id = btf__resolve_type(d->btf, type_id);
+ if (actual_type_id < 0)
+ return actual_type_id;
+
+ return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
+}
+
+static int btf_dumper_enum(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum *enums = btf_enum(t);
+ __s64 value;
+ __u16 i;
+
+ switch (t->size) {
+ case 8:
+ value = *(__s64 *)data;
+ break;
+ case 4:
+ value = *(__s32 *)data;
+ break;
+ case 2:
+ value = *(__s16 *)data;
+ break;
+ case 1:
+ value = *(__s8 *)data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (value == enums[i].val) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
+static int btf_dumper_enum64(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum64 *enums = btf_enum64(t);
+ __u32 val_lo32, val_hi32;
+ __u64 value;
+ __u16 i;
+
+ value = *(__u64 *)data;
+ val_lo32 = (__u32)value;
+ val_hi32 = value >> 32;
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (val_lo32 == enums[i].val_lo32 && val_hi32 == enums[i].val_hi32) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
+static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
+ const char *s)
+{
+ const struct btf_type *elem_type;
+ const char *end_s;
+
+ if (!arr->nelems)
+ return false;
+
+ elem_type = btf__type_by_id(btf, arr->type);
+ /* Not skipping typedef. typedef to char does not count as
+ * a string now.
+ */
+ while (elem_type && btf_is_mod(elem_type))
+ elem_type = btf__type_by_id(btf, elem_type->type);
+
+ if (!elem_type || !btf_is_int(elem_type) || elem_type->size != 1)
+ return false;
+
+ if (btf_int_encoding(elem_type) != BTF_INT_CHAR &&
+ strcmp("char", btf__name_by_offset(btf, elem_type->name_off)))
+ return false;
+
+ end_s = s + arr->nelems;
+ while (s < end_s) {
+ if (!*s)
+ return true;
+ if (*s <= 0x1f || *s >= 0x7f)
+ return false;
+ s++;
+ }
+
+ /* '\0' is not found */
+ return false;
+}
+
+static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+ struct btf_array *arr = (struct btf_array *)(t + 1);
+ long long elem_size;
+ int ret = 0;
+ __u32 i;
+
+ if (is_str_array(d->btf, arr, data)) {
+ jsonw_string(d->jw, data);
+ return 0;
+ }
+
+ elem_size = btf__resolve_size(d->btf, arr->type);
+ if (elem_size < 0)
+ return elem_size;
+
+ jsonw_start_array(d->jw);
+ for (i = 0; i < arr->nelems; i++) {
+ ret = btf_dumper_do_type(d, arr->type, 0,
+ data + i * elem_size);
+ if (ret)
+ break;
+ }
+
+ jsonw_end_array(d->jw);
+ return ret;
+}
+
+static void btf_int128_print(json_writer_t *jw, const void *data,
+ bool is_plain_text)
+{
+ /* data points to a __int128 number.
+ * Suppose
+ * int128_num = *(__int128 *)data;
+ * The below formulas shows what upper_num and lower_num represents:
+ * upper_num = int128_num >> 64;
+ * lower_num = int128_num & 0xffffffffFFFFFFFFULL;
+ */
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = *(__u64 *)data;
+ lower_num = *(__u64 *)(data + 8);
+#else
+ upper_num = *(__u64 *)(data + 8);
+ lower_num = *(__u64 *)data;
+#endif
+
+ if (is_plain_text) {
+ if (upper_num == 0)
+ jsonw_printf(jw, "0x%llx", lower_num);
+ else
+ jsonw_printf(jw, "0x%llx%016llx", upper_num, lower_num);
+ } else {
+ if (upper_num == 0)
+ jsonw_printf(jw, "\"0x%llx\"", lower_num);
+ else
+ jsonw_printf(jw, "\"0x%llx%016llx\"", upper_num, lower_num);
+ }
+}
+
+static void btf_int128_shift(__u64 *print_num, __u16 left_shift_bits,
+ __u16 right_shift_bits)
+{
+ __u64 upper_num, lower_num;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ upper_num = print_num[0];
+ lower_num = print_num[1];
+#else
+ upper_num = print_num[1];
+ lower_num = print_num[0];
+#endif
+
+ /* shake out un-needed bits by shift/or operations */
+ if (left_shift_bits >= 64) {
+ upper_num = lower_num << (left_shift_bits - 64);
+ lower_num = 0;
+ } else {
+ upper_num = (upper_num << left_shift_bits) |
+ (lower_num >> (64 - left_shift_bits));
+ lower_num = lower_num << left_shift_bits;
+ }
+
+ if (right_shift_bits >= 64) {
+ lower_num = upper_num >> (right_shift_bits - 64);
+ upper_num = 0;
+ } else {
+ lower_num = (lower_num >> right_shift_bits) |
+ (upper_num << (64 - right_shift_bits));
+ upper_num = upper_num >> right_shift_bits;
+ }
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ print_num[0] = upper_num;
+ print_num[1] = lower_num;
+#else
+ print_num[0] = lower_num;
+ print_num[1] = upper_num;
+#endif
+}
+
+static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ int left_shift_bits, right_shift_bits;
+ __u64 print_num[2] = {};
+ int bytes_to_copy;
+ int bits_to_copy;
+
+ bits_to_copy = bit_offset + nr_bits;
+ bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
+
+ memcpy(print_num, data, bytes_to_copy);
+#if defined(__BIG_ENDIAN_BITFIELD)
+ left_shift_bits = bit_offset;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ left_shift_bits = 128 - bits_to_copy;
+#else
+#error neither big nor little endian
+#endif
+ right_shift_bits = 128 - nr_bits;
+
+ btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
+ btf_int128_print(jw, print_num, is_plain_text);
+}
+
+
+static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ int nr_bits = BTF_INT_BITS(int_type);
+ int total_bits_offset;
+
+ /* bits_offset is at most 7.
+ * BTF_INT_OFFSET() cannot exceed 128 bits.
+ */
+ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
+ data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ btf_dumper_bitfield(nr_bits, bit_offset, data, jw,
+ is_plain_text);
+}
+
+static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ __u32 *int_type;
+ __u32 nr_bits;
+
+ int_type = (__u32 *)(t + 1);
+ nr_bits = BTF_INT_BITS(*int_type);
+ /* if this is bit field */
+ if (bit_offset || BTF_INT_OFFSET(*int_type) ||
+ BITS_PER_BYTE_MASKED(nr_bits)) {
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ return 0;
+ }
+
+ if (nr_bits == 128) {
+ btf_int128_print(jw, data, is_plain_text);
+ return 0;
+ }
+
+ switch (BTF_INT_ENCODING(*int_type)) {
+ case 0:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%llu", *(__u64 *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%u", *(__u32 *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hu", *(__u16 *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhu", *(__u8 *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_SIGNED:
+ if (BTF_INT_BITS(*int_type) == 64)
+ jsonw_printf(jw, "%lld", *(long long *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%d", *(int *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+ jsonw_printf(jw, "%hd", *(short *)data);
+ else if (BTF_INT_BITS(*int_type) == 8)
+ jsonw_printf(jw, "%hhd", *(char *)data);
+ else
+ btf_dumper_int_bits(*int_type, bit_offset, data, jw,
+ is_plain_text);
+ break;
+ case BTF_INT_CHAR:
+ if (isprint(*(char *)data))
+ jsonw_printf(jw, "\"%c\"", *(char *)data);
+ else
+ if (is_plain_text)
+ jsonw_printf(jw, "0x%hhx", *(char *)data);
+ else
+ jsonw_printf(jw, "\"\\u00%02hhx\"",
+ *(char *)data);
+ break;
+ case BTF_INT_BOOL:
+ jsonw_bool(jw, *(bool *)data);
+ break;
+ default:
+ /* shouldn't happen */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ const struct btf_type *t;
+ struct btf_member *m;
+ const void *data_off;
+ int kind_flag;
+ int ret = 0;
+ int i, vlen;
+
+ t = btf__type_by_id(d->btf, type_id);
+ if (!t)
+ return -EINVAL;
+
+ kind_flag = BTF_INFO_KFLAG(t->info);
+ vlen = BTF_INFO_VLEN(t->info);
+ jsonw_start_object(d->jw);
+ m = (struct btf_member *)(t + 1);
+
+ for (i = 0; i < vlen; i++) {
+ __u32 bit_offset = m[i].offset;
+ __u32 bitfield_size = 0;
+
+ if (kind_flag) {
+ bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
+ bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
+ }
+
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
+ data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
+ if (bitfield_size) {
+ btf_dumper_bitfield(bitfield_size,
+ BITS_PER_BYTE_MASKED(bit_offset),
+ data_off, d->jw, d->is_plain_text);
+ } else {
+ ret = btf_dumper_do_type(d, m[i].type,
+ BITS_PER_BYTE_MASKED(bit_offset),
+ data_off);
+ if (ret)
+ break;
+ }
+ }
+
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_var(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+ int ret;
+
+ jsonw_start_object(d->jw);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+ ret = btf_dumper_do_type(d, t->type, bit_offset, data);
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_datasec(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ struct btf_var_secinfo *vsi;
+ const struct btf_type *t;
+ int ret = 0, i, vlen;
+
+ t = btf__type_by_id(d->btf, type_id);
+ if (!t)
+ return -EINVAL;
+
+ vlen = BTF_INFO_VLEN(t->info);
+ vsi = (struct btf_var_secinfo *)(t + 1);
+
+ jsonw_start_object(d->jw);
+ jsonw_name(d->jw, btf__name_by_offset(d->btf, t->name_off));
+ jsonw_start_array(d->jw);
+ for (i = 0; i < vlen; i++) {
+ ret = btf_dumper_do_type(d, vsi[i].type, 0, data + vsi[i].offset);
+ if (ret)
+ break;
+ }
+ jsonw_end_array(d->jw);
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
+ __u8 bit_offset, const void *data)
+{
+ const struct btf_type *t = btf__type_by_id(d->btf, type_id);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ return btf_dumper_int(t, bit_offset, data, d->jw,
+ d->is_plain_text);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ return btf_dumper_struct(d, type_id, data);
+ case BTF_KIND_ARRAY:
+ return btf_dumper_array(d, type_id, data);
+ case BTF_KIND_ENUM:
+ return btf_dumper_enum(d, t, data);
+ case BTF_KIND_ENUM64:
+ return btf_dumper_enum64(d, t, data);
+ case BTF_KIND_PTR:
+ btf_dumper_ptr(d, t, data);
+ return 0;
+ case BTF_KIND_UNKN:
+ jsonw_printf(d->jw, "(unknown)");
+ return 0;
+ case BTF_KIND_FWD:
+ /* map key or value can't be forward */
+ jsonw_printf(d->jw, "(fwd-kind-invalid)");
+ return -EINVAL;
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ return btf_dumper_modifier(d, type_id, bit_offset, data);
+ case BTF_KIND_VAR:
+ return btf_dumper_var(d, type_id, bit_offset, data);
+ case BTF_KIND_DATASEC:
+ return btf_dumper_datasec(d, type_id, data);
+ default:
+ jsonw_printf(d->jw, "(unsupported-kind");
+ return -EINVAL;
+ }
+}
+
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data)
+{
+ return btf_dumper_do_type(d, type_id, 0, data);
+}
+
+#define BTF_PRINT_ARG(...) \
+ do { \
+ pos += snprintf(func_sig + pos, size - pos, \
+ __VA_ARGS__); \
+ if (pos >= size) \
+ return -1; \
+ } while (0)
+#define BTF_PRINT_TYPE(type) \
+ do { \
+ pos = __btf_dumper_type_only(btf, type, func_sig, \
+ pos, size); \
+ if (pos == -1) \
+ return -1; \
+ } while (0)
+
+static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
+ char *func_sig, int pos, int size)
+{
+ const struct btf_type *proto_type;
+ const struct btf_array *array;
+ const struct btf_var *var;
+ const struct btf_type *t;
+
+ if (!type_id) {
+ BTF_PRINT_ARG("void ");
+ return pos;
+ }
+
+ t = btf__type_by_id(btf, type_id);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FLOAT:
+ BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_STRUCT:
+ BTF_PRINT_ARG("struct %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_UNION:
+ BTF_PRINT_ARG("union %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ BTF_PRINT_ARG("enum %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_ARRAY:
+ array = (struct btf_array *)(t + 1);
+ BTF_PRINT_TYPE(array->type);
+ BTF_PRINT_ARG("[%d]", array->nelems);
+ break;
+ case BTF_KIND_PTR:
+ BTF_PRINT_TYPE(t->type);
+ BTF_PRINT_ARG("* ");
+ break;
+ case BTF_KIND_FWD:
+ BTF_PRINT_ARG("%s %s ",
+ BTF_INFO_KFLAG(t->info) ? "union" : "struct",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_VOLATILE:
+ BTF_PRINT_ARG("volatile ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_CONST:
+ BTF_PRINT_ARG("const ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_RESTRICT:
+ BTF_PRINT_ARG("restrict ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
+ if (pos == -1)
+ return -1;
+ break;
+ case BTF_KIND_FUNC:
+ proto_type = btf__type_by_id(btf, t->type);
+ pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
+ if (pos == -1)
+ return -1;
+ break;
+ case BTF_KIND_VAR:
+ var = (struct btf_var *)(t + 1);
+ if (var->linkage == BTF_VAR_STATIC)
+ BTF_PRINT_ARG("static ");
+ BTF_PRINT_TYPE(t->type);
+ BTF_PRINT_ARG(" %s",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_DATASEC:
+ BTF_PRINT_ARG("section (\"%s\") ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_UNKN:
+ default:
+ return -1;
+ }
+
+ return pos;
+}
+
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size)
+{
+ int i, vlen;
+
+ BTF_PRINT_TYPE(func_proto->type);
+ if (func)
+ BTF_PRINT_ARG("%s(", btf__name_by_offset(btf, func->name_off));
+ else
+ BTF_PRINT_ARG("(");
+ vlen = BTF_INFO_VLEN(func_proto->info);
+ for (i = 0; i < vlen; i++) {
+ struct btf_param *arg = &((struct btf_param *)(func_proto + 1))[i];
+
+ if (i)
+ BTF_PRINT_ARG(", ");
+ if (arg->type) {
+ BTF_PRINT_TYPE(arg->type);
+ if (arg->name_off)
+ BTF_PRINT_ARG("%s",
+ btf__name_by_offset(btf, arg->name_off));
+ else if (pos && func_sig[pos - 1] == ' ')
+ /* Remove unnecessary space for
+ * FUNC_PROTO that does not have
+ * arg->name_off
+ */
+ func_sig[--pos] = '\0';
+ } else {
+ BTF_PRINT_ARG("...");
+ }
+ }
+ BTF_PRINT_ARG(")");
+
+ return pos;
+}
+
+void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
+ int size)
+{
+ int err;
+
+ func_sig[0] = '\0';
+ if (!btf)
+ return;
+
+ err = __btf_dumper_type_only(btf, type_id, func_sig, 0, size);
+ if (err < 0)
+ func_sig[0] = '\0';
+}
+
+static const char *ltrim(const char *s)
+{
+ while (isspace(*s))
+ s++;
+
+ return s;
+}
+
+void btf_dump_linfo_plain(const struct btf *btf,
+ const struct bpf_line_info *linfo,
+ const char *prefix, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (!line)
+ return;
+ line = ltrim(line);
+
+ if (!prefix)
+ prefix = "";
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+
+ /* More forgiving on file because linum option is
+ * expected to provide more info than the already
+ * available src line.
+ */
+ if (!file)
+ file = "";
+
+ printf("%s%s [file:%s line_num:%u line_col:%u]\n",
+ prefix, line, file,
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col),
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ } else {
+ printf("%s%s\n", prefix, line);
+ }
+}
+
+void btf_dump_linfo_json(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (line)
+ jsonw_string_field(json_wtr, "src", ltrim(line));
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+
+ if (file)
+ jsonw_string_field(json_wtr, "file", file);
+
+ if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
+ jsonw_int_field(json_wtr, "line_num",
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col));
+
+ if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
+ jsonw_int_field(json_wtr, "line_col",
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ }
+}
+
+static void dotlabel_puts(const char *s)
+{
+ for (; *s; ++s) {
+ switch (*s) {
+ case '\\':
+ case '"':
+ case '{':
+ case '}':
+ case '<':
+ case '>':
+ case '|':
+ case ' ':
+ putchar('\\');
+ fallthrough;
+ default:
+ putchar(*s);
+ }
+ }
+}
+
+static const char *shorten_path(const char *path)
+{
+ const unsigned int MAX_PATH_LEN = 32;
+ size_t len = strlen(path);
+ const char *shortpath;
+
+ if (len <= MAX_PATH_LEN)
+ return path;
+
+ /* Search for last '/' under the MAX_PATH_LEN limit */
+ shortpath = strchr(path + len - MAX_PATH_LEN, '/');
+ if (shortpath) {
+ if (shortpath < path + strlen("..."))
+ /* We removed a very short prefix, e.g. "/w", and we'll
+ * make the path longer by prefixing with the ellipsis.
+ * Not worth it, keep initial path.
+ */
+ return path;
+ return shortpath;
+ }
+
+ /* File base name length is > MAX_PATH_LEN, search for last '/' */
+ shortpath = strrchr(path, '/');
+ if (shortpath)
+ return shortpath;
+
+ return path;
+}
+
+void btf_dump_linfo_dotlabel(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (!line || !strlen(line))
+ return;
+ line = ltrim(line);
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+ const char *shortfile;
+
+ /* More forgiving on file because linum option is
+ * expected to provide more info than the already
+ * available src line.
+ */
+ if (!file)
+ shortfile = "";
+ else
+ shortfile = shorten_path(file);
+
+ printf("; [%s", shortfile > file ? "..." : "");
+ dotlabel_puts(shortfile);
+ printf(" line:%u col:%u]\\l\\\n",
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col),
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ }
+
+ printf("; ");
+ dotlabel_puts(line);
+ printf("\\l\\\n");
+}
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
new file mode 100644
index 0000000000..eec437cca2
--- /dev/null
+++ b/tools/bpf/bpftool/cfg.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/list.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "cfg.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+struct cfg {
+ struct list_head funcs;
+ int func_num;
+};
+
+struct func_node {
+ struct list_head l;
+ struct list_head bbs;
+ struct bpf_insn *start;
+ struct bpf_insn *end;
+ int idx;
+ int bb_num;
+};
+
+struct bb_node {
+ struct list_head l;
+ struct list_head e_prevs;
+ struct list_head e_succs;
+ struct bpf_insn *head;
+ struct bpf_insn *tail;
+ int idx;
+};
+
+#define EDGE_FLAG_EMPTY 0x0
+#define EDGE_FLAG_FALLTHROUGH 0x1
+#define EDGE_FLAG_JUMP 0x2
+struct edge_node {
+ struct list_head l;
+ struct bb_node *src;
+ struct bb_node *dst;
+ int flags;
+};
+
+#define ENTRY_BLOCK_INDEX 0
+#define EXIT_BLOCK_INDEX 1
+#define NUM_FIXED_BLOCKS 2
+#define func_prev(func) list_prev_entry(func, l)
+#define func_next(func) list_next_entry(func, l)
+#define bb_prev(bb) list_prev_entry(bb, l)
+#define bb_next(bb) list_next_entry(bb, l)
+#define entry_bb(func) func_first_bb(func)
+#define exit_bb(func) func_last_bb(func)
+#define cfg_first_func(cfg) \
+ list_first_entry(&cfg->funcs, struct func_node, l)
+#define cfg_last_func(cfg) \
+ list_last_entry(&cfg->funcs, struct func_node, l)
+#define func_first_bb(func) \
+ list_first_entry(&func->bbs, struct bb_node, l)
+#define func_last_bb(func) \
+ list_last_entry(&func->bbs, struct bb_node, l)
+
+static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn)
+{
+ struct func_node *new_func, *func;
+
+ list_for_each_entry(func, &cfg->funcs, l) {
+ if (func->start == insn)
+ return func;
+ else if (func->start > insn)
+ break;
+ }
+
+ func = func_prev(func);
+ new_func = calloc(1, sizeof(*new_func));
+ if (!new_func) {
+ p_err("OOM when allocating FUNC node");
+ return NULL;
+ }
+ new_func->start = insn;
+ new_func->idx = cfg->func_num;
+ list_add(&new_func->l, &func->l);
+ cfg->func_num++;
+
+ return new_func;
+}
+
+static struct bb_node *func_append_bb(struct func_node *func,
+ struct bpf_insn *insn)
+{
+ struct bb_node *new_bb, *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ if (bb->head == insn)
+ return bb;
+ else if (bb->head > insn)
+ break;
+ }
+
+ bb = bb_prev(bb);
+ new_bb = calloc(1, sizeof(*new_bb));
+ if (!new_bb) {
+ p_err("OOM when allocating BB node");
+ return NULL;
+ }
+ new_bb->head = insn;
+ INIT_LIST_HEAD(&new_bb->e_prevs);
+ INIT_LIST_HEAD(&new_bb->e_succs);
+ list_add(&new_bb->l, &bb->l);
+
+ return new_bb;
+}
+
+static struct bb_node *func_insert_dummy_bb(struct list_head *after)
+{
+ struct bb_node *bb;
+
+ bb = calloc(1, sizeof(*bb));
+ if (!bb) {
+ p_err("OOM when allocating BB node");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&bb->e_prevs);
+ INIT_LIST_HEAD(&bb->e_succs);
+ list_add(&bb->l, after);
+
+ return bb;
+}
+
+static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
+ struct bpf_insn *end)
+{
+ struct func_node *func, *last_func;
+
+ func = cfg_append_func(cfg, cur);
+ if (!func)
+ return true;
+
+ for (; cur < end; cur++) {
+ if (cur->code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (cur->src_reg != BPF_PSEUDO_CALL)
+ continue;
+ func = cfg_append_func(cfg, cur + cur->off + 1);
+ if (!func)
+ return true;
+ }
+
+ last_func = cfg_last_func(cfg);
+ last_func->end = end - 1;
+ func = cfg_first_func(cfg);
+ list_for_each_entry_from(func, &last_func->l, l) {
+ func->end = func_next(func)->start - 1;
+ }
+
+ return false;
+}
+
+static bool is_jmp_insn(__u8 code)
+{
+ return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
+}
+
+static bool func_partition_bb_head(struct func_node *func)
+{
+ struct bpf_insn *cur, *end;
+ struct bb_node *bb;
+
+ cur = func->start;
+ end = func->end;
+ INIT_LIST_HEAD(&func->bbs);
+ bb = func_append_bb(func, cur);
+ if (!bb)
+ return true;
+
+ for (; cur <= end; cur++) {
+ if (is_jmp_insn(cur->code)) {
+ __u8 opcode = BPF_OP(cur->code);
+
+ if (opcode == BPF_EXIT || opcode == BPF_CALL)
+ continue;
+
+ bb = func_append_bb(func, cur + cur->off + 1);
+ if (!bb)
+ return true;
+
+ if (opcode != BPF_JA) {
+ bb = func_append_bb(func, cur + 1);
+ if (!bb)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void func_partition_bb_tail(struct func_node *func)
+{
+ unsigned int bb_idx = NUM_FIXED_BLOCKS;
+ struct bb_node *bb, *last;
+
+ last = func_last_bb(func);
+ last->tail = func->end;
+ bb = func_first_bb(func);
+ list_for_each_entry_from(bb, &last->l, l) {
+ bb->tail = bb_next(bb)->head - 1;
+ bb->idx = bb_idx++;
+ }
+
+ last->idx = bb_idx++;
+ func->bb_num = bb_idx;
+}
+
+static bool func_add_special_bb(struct func_node *func)
+{
+ struct bb_node *bb;
+
+ bb = func_insert_dummy_bb(&func->bbs);
+ if (!bb)
+ return true;
+ bb->idx = ENTRY_BLOCK_INDEX;
+
+ bb = func_insert_dummy_bb(&func_last_bb(func)->l);
+ if (!bb)
+ return true;
+ bb->idx = EXIT_BLOCK_INDEX;
+
+ return false;
+}
+
+static bool func_partition_bb(struct func_node *func)
+{
+ if (func_partition_bb_head(func))
+ return true;
+
+ func_partition_bb_tail(func);
+
+ return false;
+}
+
+static struct bb_node *func_search_bb_with_head(struct func_node *func,
+ struct bpf_insn *insn)
+{
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ if (bb->head == insn)
+ return bb;
+ }
+
+ return NULL;
+}
+
+static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst,
+ int flags)
+{
+ struct edge_node *e;
+
+ e = calloc(1, sizeof(*e));
+ if (!e) {
+ p_err("OOM when allocating edge node");
+ return NULL;
+ }
+
+ if (src)
+ e->src = src;
+ if (dst)
+ e->dst = dst;
+
+ e->flags |= flags;
+
+ return e;
+}
+
+static bool func_add_bb_edges(struct func_node *func)
+{
+ struct bpf_insn *insn;
+ struct edge_node *e;
+ struct bb_node *bb;
+
+ bb = entry_bb(func);
+ e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH);
+ if (!e)
+ return true;
+ list_add_tail(&e->l, &bb->e_succs);
+
+ bb = exit_bb(func);
+ e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH);
+ if (!e)
+ return true;
+ list_add_tail(&e->l, &bb->e_prevs);
+
+ bb = entry_bb(func);
+ bb = bb_next(bb);
+ list_for_each_entry_from(bb, &exit_bb(func)->l, l) {
+ e = new_edge(bb, NULL, EDGE_FLAG_EMPTY);
+ if (!e)
+ return true;
+ e->src = bb;
+
+ insn = bb->tail;
+ if (!is_jmp_insn(insn->code) ||
+ BPF_OP(insn->code) == BPF_EXIT) {
+ e->dst = bb_next(bb);
+ e->flags |= EDGE_FLAG_FALLTHROUGH;
+ list_add_tail(&e->l, &bb->e_succs);
+ continue;
+ } else if (BPF_OP(insn->code) == BPF_JA) {
+ e->dst = func_search_bb_with_head(func,
+ insn + insn->off + 1);
+ e->flags |= EDGE_FLAG_JUMP;
+ list_add_tail(&e->l, &bb->e_succs);
+ continue;
+ }
+
+ e->dst = bb_next(bb);
+ e->flags |= EDGE_FLAG_FALLTHROUGH;
+ list_add_tail(&e->l, &bb->e_succs);
+
+ e = new_edge(bb, NULL, EDGE_FLAG_JUMP);
+ if (!e)
+ return true;
+ e->src = bb;
+ e->dst = func_search_bb_with_head(func, insn + insn->off + 1);
+ list_add_tail(&e->l, &bb->e_succs);
+ }
+
+ return false;
+}
+
+static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len)
+{
+ int cnt = len / sizeof(*insn);
+ struct func_node *func;
+
+ INIT_LIST_HEAD(&cfg->funcs);
+
+ if (cfg_partition_funcs(cfg, insn, insn + cnt))
+ return true;
+
+ list_for_each_entry(func, &cfg->funcs, l) {
+ if (func_partition_bb(func) || func_add_special_bb(func))
+ return true;
+
+ if (func_add_bb_edges(func))
+ return true;
+ }
+
+ return false;
+}
+
+static void cfg_destroy(struct cfg *cfg)
+{
+ struct func_node *func, *func2;
+
+ list_for_each_entry_safe(func, func2, &cfg->funcs, l) {
+ struct bb_node *bb, *bb2;
+
+ list_for_each_entry_safe(bb, bb2, &func->bbs, l) {
+ struct edge_node *e, *e2;
+
+ list_for_each_entry_safe(e, e2, &bb->e_prevs, l) {
+ list_del(&e->l);
+ free(e);
+ }
+
+ list_for_each_entry_safe(e, e2, &bb->e_succs, l) {
+ list_del(&e->l);
+ free(e);
+ }
+
+ list_del(&bb->l);
+ free(bb);
+ }
+
+ list_del(&func->l);
+ free(func);
+ }
+}
+
+static void
+draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
+ bool opcodes, bool linum)
+{
+ const char *shape;
+
+ if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX)
+ shape = "Mdiamond";
+ else
+ shape = "record";
+
+ printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"",
+ func->idx, bb->idx, shape);
+
+ if (bb->idx == ENTRY_BLOCK_INDEX) {
+ printf("ENTRY");
+ } else if (bb->idx == EXIT_BLOCK_INDEX) {
+ printf("EXIT");
+ } else {
+ unsigned int start_idx;
+ printf("{\\\n");
+ start_idx = bb->head - func->start;
+ dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
+ opcodes, linum);
+ printf("}");
+ }
+
+ printf("\"];\n\n");
+}
+
+static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
+{
+ const char *style = "\"solid,bold\"";
+ const char *color = "black";
+ int func_idx = func->idx;
+ struct edge_node *e;
+ int weight = 10;
+
+ if (list_empty(&bb->e_succs))
+ return;
+
+ list_for_each_entry(e, &bb->e_succs, l) {
+ printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true",
+ func_idx, e->src->idx, func_idx, e->dst->idx,
+ style, color, weight);
+ printf("];\n");
+ }
+}
+
+static void
+func_output_bb_def(struct func_node *func, struct dump_data *dd,
+ bool opcodes, bool linum)
+{
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ draw_bb_node(func, bb, dd, opcodes, linum);
+ }
+}
+
+static void func_output_edges(struct func_node *func)
+{
+ int func_idx = func->idx;
+ struct bb_node *bb;
+
+ list_for_each_entry(bb, &func->bbs, l) {
+ draw_bb_succ_edges(func, bb);
+ }
+
+ /* Add an invisible edge from ENTRY to EXIT, this is to
+ * improve the graph layout.
+ */
+ printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n",
+ func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
+}
+
+static void
+cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
+{
+ struct func_node *func;
+
+ printf("digraph \"DOT graph for eBPF program\" {\n");
+ list_for_each_entry(func, &cfg->funcs, l) {
+ printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
+ func->idx, func->idx);
+ func_output_bb_def(func, dd, opcodes, linum);
+ func_output_edges(func);
+ printf("}\n");
+ }
+ printf("}\n");
+}
+
+void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum)
+{
+ struct bpf_insn *insn = buf;
+ struct cfg cfg;
+
+ memset(&cfg, 0, sizeof(cfg));
+ if (cfg_build(&cfg, insn, len))
+ return;
+
+ cfg_dump(&cfg, dd, opcodes, linum);
+
+ cfg_destroy(&cfg);
+}
diff --git a/tools/bpf/bpftool/cfg.h b/tools/bpf/bpftool/cfg.h
new file mode 100644
index 0000000000..b3793f4e17
--- /dev/null
+++ b/tools/bpf/bpftool/cfg.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef __BPF_TOOL_CFG_H
+#define __BPF_TOOL_CFG_H
+
+#include "xlated_dumper.h"
+
+void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum);
+
+#endif /* __BPF_TOOL_CFG_H */
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
new file mode 100644
index 0000000000..ac846b0805
--- /dev/null
+++ b/tools/bpf/bpftool/cgroup.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2017 Facebook
+// Author: Roman Gushchin <guro@fb.com>
+
+#define _XOPEN_SOURCE 500
+#include <errno.h>
+#include <fcntl.h>
+#include <ftw.h>
+#include <mntent.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+
+#include "main.h"
+
+#define HELP_SPEC_ATTACH_FLAGS \
+ "ATTACH_FLAGS := { multi | override }"
+
+#define HELP_SPEC_ATTACH_TYPES \
+ " ATTACH_TYPE := { cgroup_inet_ingress | cgroup_inet_egress |\n" \
+ " cgroup_inet_sock_create | cgroup_sock_ops |\n" \
+ " cgroup_device | cgroup_inet4_bind |\n" \
+ " cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \
+ " cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \
+ " cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \
+ " cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \
+ " cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \
+ " cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \
+ " cgroup_udp6_recvmsg | cgroup_sysctl |\n" \
+ " cgroup_getsockopt | cgroup_setsockopt |\n" \
+ " cgroup_inet_sock_release }"
+
+static unsigned int query_flags;
+static struct btf *btf_vmlinux;
+static __u32 btf_vmlinux_id;
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+ const char *attach_type_str;
+ enum bpf_attach_type type;
+
+ for (type = 0; ; type++) {
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!attach_type_str)
+ break;
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
+ /* Also check traditionally used attach type strings. For these we keep
+ * allowing prefixed usage.
+ */
+ for (type = 0; ; type++) {
+ attach_type_str = bpf_attach_type_input_str(type);
+ if (!attach_type_str)
+ break;
+ if (is_prefix(str, attach_type_str))
+ return type;
+ }
+
+ return __MAX_BPF_ATTACH_TYPE;
+}
+
+static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
+{
+ struct bpf_btf_info btf_info = {};
+ __u32 btf_len = sizeof(btf_info);
+ char name[16] = {};
+ int err;
+ int fd;
+
+ btf_info.name = ptr_to_u64(name);
+ btf_info.name_len = sizeof(name);
+
+ fd = bpf_btf_get_fd_by_id(attach_btf_obj_id);
+ if (fd < 0)
+ return;
+
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len);
+ if (err)
+ goto out;
+
+ if (btf_info.kernel_btf && strncmp(name, "vmlinux", sizeof(name)) == 0)
+ btf_vmlinux_id = btf_info.id;
+
+out:
+ close(fd);
+}
+
+static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
+ const char *attach_flags_str,
+ int level)
+{
+ char prog_name[MAX_PROG_FULL_NAME];
+ const char *attach_btf_name = NULL;
+ struct bpf_prog_info info = {};
+ const char *attach_type_str;
+ __u32 info_len = sizeof(info);
+ int prog_fd;
+
+ prog_fd = bpf_prog_get_fd_by_id(id);
+ if (prog_fd < 0)
+ return -1;
+
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
+ close(prog_fd);
+ return -1;
+ }
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+
+ if (btf_vmlinux) {
+ if (!btf_vmlinux_id)
+ guess_vmlinux_btf_id(info.attach_btf_obj_id);
+
+ if (btf_vmlinux_id == info.attach_btf_obj_id &&
+ info.attach_btf_id < btf__type_cnt(btf_vmlinux)) {
+ const struct btf_type *t =
+ btf__type_by_id(btf_vmlinux, info.attach_btf_id);
+ attach_btf_name =
+ btf__name_by_offset(btf_vmlinux, t->name_off);
+ }
+ }
+
+ get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "id", info.id);
+ if (attach_type_str)
+ jsonw_string_field(json_wtr, "attach_type", attach_type_str);
+ else
+ jsonw_uint_field(json_wtr, "attach_type", attach_type);
+ if (!(query_flags & BPF_F_QUERY_EFFECTIVE))
+ jsonw_string_field(json_wtr, "attach_flags", attach_flags_str);
+ jsonw_string_field(json_wtr, "name", prog_name);
+ if (attach_btf_name)
+ jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
+ jsonw_uint_field(json_wtr, "attach_btf_obj_id", info.attach_btf_obj_id);
+ jsonw_uint_field(json_wtr, "attach_btf_id", info.attach_btf_id);
+ jsonw_end_object(json_wtr);
+ } else {
+ printf("%s%-8u ", level ? " " : "", info.id);
+ if (attach_type_str)
+ printf("%-15s", attach_type_str);
+ else
+ printf("type %-10u", attach_type);
+ if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf(" %-15s", prog_name);
+ else
+ printf(" %-15s %-15s", attach_flags_str, prog_name);
+ if (attach_btf_name)
+ printf(" %-15s", attach_btf_name);
+ else if (info.attach_btf_id)
+ printf(" attach_btf_obj_id=%d attach_btf_id=%d",
+ info.attach_btf_obj_id, info.attach_btf_id);
+ printf("\n");
+ }
+
+ close(prog_fd);
+ return 0;
+}
+
+static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type)
+{
+ __u32 prog_cnt = 0;
+ int ret;
+
+ ret = bpf_prog_query(cgroup_fd, type, query_flags, NULL,
+ NULL, &prog_cnt);
+ if (ret)
+ return -1;
+
+ return prog_cnt;
+}
+
+static int cgroup_has_attached_progs(int cgroup_fd)
+{
+ enum bpf_attach_type type;
+ bool no_prog = true;
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ int count = count_attached_bpf_progs(cgroup_fd, type);
+
+ if (count < 0 && errno != EINVAL)
+ return -1;
+
+ if (count > 0) {
+ no_prog = false;
+ break;
+ }
+ }
+
+ return no_prog ? 0 : 1;
+}
+
+static int show_effective_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ __u32 prog_ids[1024] = {0};
+ __u32 iter;
+ int ret;
+
+ p.query_flags = query_flags;
+ p.prog_cnt = ARRAY_SIZE(prog_ids);
+ p.prog_ids = prog_ids;
+
+ ret = bpf_prog_query_opts(cgroup_fd, type, &p);
+ if (ret)
+ return ret;
+
+ if (p.prog_cnt == 0)
+ return 0;
+
+ for (iter = 0; iter < p.prog_cnt; iter++)
+ show_bpf_prog(prog_ids[iter], type, NULL, level);
+
+ return 0;
+}
+
+static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ __u32 prog_attach_flags[1024] = {0};
+ const char *attach_flags_str;
+ __u32 prog_ids[1024] = {0};
+ char buf[32];
+ __u32 iter;
+ int ret;
+
+ p.query_flags = query_flags;
+ p.prog_cnt = ARRAY_SIZE(prog_ids);
+ p.prog_ids = prog_ids;
+ p.prog_attach_flags = prog_attach_flags;
+
+ ret = bpf_prog_query_opts(cgroup_fd, type, &p);
+ if (ret)
+ return ret;
+
+ if (p.prog_cnt == 0)
+ return 0;
+
+ for (iter = 0; iter < p.prog_cnt; iter++) {
+ __u32 attach_flags;
+
+ attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
+
+ switch (attach_flags) {
+ case BPF_F_ALLOW_MULTI:
+ attach_flags_str = "multi";
+ break;
+ case BPF_F_ALLOW_OVERRIDE:
+ attach_flags_str = "override";
+ break;
+ case 0:
+ attach_flags_str = "";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+ attach_flags_str = buf;
+ }
+
+ show_bpf_prog(prog_ids[iter], type,
+ attach_flags_str, level);
+ }
+
+ return 0;
+}
+
+static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
+{
+ return query_flags & BPF_F_QUERY_EFFECTIVE ?
+ show_effective_bpf_progs(cgroup_fd, type, level) :
+ show_attached_bpf_progs(cgroup_fd, type, level);
+}
+
+static int do_show(int argc, char **argv)
+{
+ enum bpf_attach_type type;
+ int has_attached_progs;
+ const char *path;
+ int cgroup_fd;
+ int ret = -1;
+
+ query_flags = 0;
+
+ if (!REQ_ARGS(1))
+ return -1;
+ path = GET_ARG();
+
+ while (argc) {
+ if (is_prefix(*argv, "effective")) {
+ if (query_flags & BPF_F_QUERY_EFFECTIVE) {
+ p_err("duplicated argument: %s", *argv);
+ return -1;
+ }
+ query_flags |= BPF_F_QUERY_EFFECTIVE;
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'effective', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ cgroup_fd = open(path, O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", path);
+ goto exit;
+ }
+
+ has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
+ if (has_attached_progs < 0) {
+ p_err("can't query bpf programs attached to %s: %s",
+ path, strerror(errno));
+ goto exit_cgroup;
+ } else if (!has_attached_progs) {
+ ret = 0;
+ goto exit_cgroup;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf("%-8s %-15s %-15s\n", "ID", "AttachType", "Name");
+ else
+ printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
+ "AttachFlags", "Name");
+
+ btf_vmlinux = libbpf_find_kernel_btf();
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ /*
+ * Not all attach types may be supported, so it's expected,
+ * that some requests will fail.
+ * If we were able to get the show for at least one
+ * attach type, let's return 0.
+ */
+ if (show_bpf_progs(cgroup_fd, type, 0) == 0)
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+/*
+ * To distinguish nftw() errors and do_show_tree_fn() errors
+ * and avoid duplicating error messages, let's return -2
+ * from do_show_tree_fn() in case of error.
+ */
+#define NFTW_ERR -1
+#define SHOW_TREE_FN_ERR -2
+static int do_show_tree_fn(const char *fpath, const struct stat *sb,
+ int typeflag, struct FTW *ftw)
+{
+ enum bpf_attach_type type;
+ int has_attached_progs;
+ int cgroup_fd;
+
+ if (typeflag != FTW_D)
+ return 0;
+
+ cgroup_fd = open(fpath, O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s: %s", fpath, strerror(errno));
+ return SHOW_TREE_FN_ERR;
+ }
+
+ has_attached_progs = cgroup_has_attached_progs(cgroup_fd);
+ if (has_attached_progs < 0) {
+ p_err("can't query bpf programs attached to %s: %s",
+ fpath, strerror(errno));
+ close(cgroup_fd);
+ return SHOW_TREE_FN_ERR;
+ } else if (!has_attached_progs) {
+ close(cgroup_fd);
+ return 0;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "cgroup", fpath);
+ jsonw_name(json_wtr, "programs");
+ jsonw_start_array(json_wtr);
+ } else {
+ printf("%s\n", fpath);
+ }
+
+ btf_vmlinux = libbpf_find_kernel_btf();
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
+ show_bpf_progs(cgroup_fd, type, ftw->level);
+
+ if (errno == EINVAL)
+ /* Last attach type does not support query.
+ * Do not report an error for this, especially because batch
+ * mode would stop processing commands.
+ */
+ errno = 0;
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_end_object(json_wtr);
+ }
+
+ close(cgroup_fd);
+
+ return 0;
+}
+
+static char *find_cgroup_root(void)
+{
+ struct mntent *mnt;
+ FILE *f;
+
+ f = fopen("/proc/mounts", "r");
+ if (f == NULL)
+ return NULL;
+
+ while ((mnt = getmntent(f))) {
+ if (strcmp(mnt->mnt_type, "cgroup2") == 0) {
+ fclose(f);
+ return strdup(mnt->mnt_dir);
+ }
+ }
+
+ fclose(f);
+ return NULL;
+}
+
+static int do_show_tree(int argc, char **argv)
+{
+ char *cgroup_root, *cgroup_alloced = NULL;
+ int ret;
+
+ query_flags = 0;
+
+ if (!argc) {
+ cgroup_alloced = find_cgroup_root();
+ if (!cgroup_alloced) {
+ p_err("cgroup v2 isn't mounted");
+ return -1;
+ }
+ cgroup_root = cgroup_alloced;
+ } else {
+ cgroup_root = GET_ARG();
+
+ while (argc) {
+ if (is_prefix(*argv, "effective")) {
+ if (query_flags & BPF_F_QUERY_EFFECTIVE) {
+ p_err("duplicated argument: %s", *argv);
+ return -1;
+ }
+ query_flags |= BPF_F_QUERY_EFFECTIVE;
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'effective', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ else if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf("%s\n"
+ "%-8s %-15s %-15s\n",
+ "CgroupPath",
+ "ID", "AttachType", "Name");
+ else
+ printf("%s\n"
+ "%-8s %-15s %-15s %-15s\n",
+ "CgroupPath",
+ "ID", "AttachType", "AttachFlags", "Name");
+
+ switch (nftw(cgroup_root, do_show_tree_fn, 1024, FTW_MOUNT)) {
+ case NFTW_ERR:
+ p_err("can't iterate over %s: %s", cgroup_root,
+ strerror(errno));
+ ret = -1;
+ break;
+ case SHOW_TREE_FN_ERR:
+ ret = -1;
+ break;
+ default:
+ ret = 0;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ free(cgroup_alloced);
+
+ return ret;
+}
+
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int cgroup_fd, prog_fd;
+ int attach_flags = 0;
+ int ret = -1;
+ int i;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup attach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[0]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ for (i = 0; i < argc; i++) {
+ if (is_prefix(argv[i], "multi")) {
+ attach_flags |= BPF_F_ALLOW_MULTI;
+ } else if (is_prefix(argv[i], "override")) {
+ attach_flags |= BPF_F_ALLOW_OVERRIDE;
+ } else {
+ p_err("unknown option: %s", argv[i]);
+ goto exit_cgroup;
+ }
+ }
+
+ if (bpf_prog_attach(prog_fd, cgroup_fd, attach_type, attach_flags)) {
+ p_err("failed to attach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int prog_fd, cgroup_fd;
+ int ret = -1;
+
+ if (argc < 4) {
+ p_err("too few parameters for cgroup detach");
+ goto exit;
+ }
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+ p_err("can't open cgroup %s", argv[0]);
+ goto exit;
+ }
+
+ attach_type = parse_attach_type(argv[1]);
+ if (attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach type");
+ goto exit_cgroup;
+ }
+
+ argc -= 2;
+ argv = &argv[2];
+ prog_fd = prog_parse_fd(&argc, &argv);
+ if (prog_fd < 0)
+ goto exit_cgroup;
+
+ if (bpf_prog_detach2(prog_fd, cgroup_fd, attach_type)) {
+ p_err("failed to detach program");
+ goto exit_prog;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ ret = 0;
+
+exit_prog:
+ close(prog_fd);
+exit_cgroup:
+ close(cgroup_fd);
+exit:
+ return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } CGROUP [**effective**]\n"
+ " %1$s %2$s tree [CGROUP_ROOT] [**effective**]\n"
+ " %1$s %2$s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+ " %1$s %2$s detach CGROUP ATTACH_TYPE PROG\n"
+ " %1$s %2$s help\n"
+ "\n"
+ HELP_SPEC_ATTACH_TYPES "\n"
+ " " HELP_SPEC_ATTACH_FLAGS "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "tree", do_show_tree },
+ { "attach", do_attach },
+ { "detach", do_detach },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_cgroup(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
new file mode 100644
index 0000000000..cc6e6aae24
--- /dev/null
+++ b/tools/bpf/bpftool/common.c
@@ -0,0 +1,1110 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <ftw.h>
+#include <libgen.h>
+#include <mntent.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/mount.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+#include <linux/magic.h>
+#include <linux/unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
+#include <bpf/btf.h>
+
+#include "main.h"
+
+#ifndef BPF_FS_MAGIC
+#define BPF_FS_MAGIC 0xcafe4a11
+#endif
+
+void p_err(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "error");
+ jsonw_vprintf_enquote(json_wtr, fmt, ap);
+ jsonw_end_object(json_wtr);
+ } else {
+ fprintf(stderr, "Error: ");
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ }
+ va_end(ap);
+}
+
+void p_info(const char *fmt, ...)
+{
+ va_list ap;
+
+ if (json_output)
+ return;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ fprintf(stderr, "\n");
+ va_end(ap);
+}
+
+static bool is_bpffs(const char *path)
+{
+ struct statfs st_fs;
+
+ if (statfs(path, &st_fs) < 0)
+ return false;
+
+ return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
+}
+
+/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
+ * memcg-based memory accounting for BPF maps and programs. This was done in
+ * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
+ * accounting'"), in Linux 5.11.
+ *
+ * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
+ * so by checking for the availability of a given BPF helper and this has
+ * failed on some kernels with backports in the past, see commit 6b4384ff1088
+ * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
+ * Instead, we can probe by lowering the process-based rlimit to 0, trying to
+ * load a BPF object, and resetting the rlimit. If the load succeeds then
+ * memcg-based accounting is supported.
+ *
+ * This would be too dangerous to do in the library, because multithreaded
+ * applications might attempt to load items while the rlimit is at 0. Given
+ * that bpftool is single-threaded, this is fine to do here.
+ */
+static bool known_to_need_rlimit(void)
+{
+ struct rlimit rlim_init, rlim_cur_zero = {};
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ union bpf_attr attr;
+ int prog_fd, err;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = insn_cnt;
+ attr.license = ptr_to_u64("GPL");
+
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
+ return false;
+
+ /* Drop the soft limit to zero. We maintain the hard limit to its
+ * current value, because lowering it would be a permanent operation
+ * for unprivileged users.
+ */
+ rlim_cur_zero.rlim_max = rlim_init.rlim_max;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
+ return false;
+
+ /* Do not use bpf_prog_load() from libbpf here, because it calls
+ * bump_rlimit_memlock(), interfering with the current probe.
+ */
+ prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ err = errno;
+
+ /* reset soft rlimit to its initial value */
+ setrlimit(RLIMIT_MEMLOCK, &rlim_init);
+
+ if (prog_fd < 0)
+ return err == EPERM;
+
+ close(prog_fd);
+ return false;
+}
+
+void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ if (known_to_need_rlimit())
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static int
+mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
+{
+ bool bind_done = false;
+
+ while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
+ if (errno != EINVAL || bind_done) {
+ snprintf(buff, bufflen,
+ "mount --make-private %s failed: %s",
+ target, strerror(errno));
+ return -1;
+ }
+
+ if (mount(target, target, "none", MS_BIND, NULL)) {
+ snprintf(buff, bufflen,
+ "mount --bind %s %s failed: %s",
+ target, target, strerror(errno));
+ return -1;
+ }
+
+ bind_done = true;
+ }
+
+ if (mount(type, target, type, 0, "mode=0700")) {
+ snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
+ type, type, target, strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+int mount_tracefs(const char *target)
+{
+ char err_str[ERR_MAX_LEN];
+ int err;
+
+ err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
+ if (err) {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount tracefs: %s", err_str);
+ }
+
+ return err;
+}
+
+int open_obj_pinned(const char *path, bool quiet)
+{
+ char *pname;
+ int fd = -1;
+
+ pname = strdup(path);
+ if (!pname) {
+ if (!quiet)
+ p_err("mem alloc failed");
+ goto out_ret;
+ }
+
+ fd = bpf_obj_get(pname);
+ if (fd < 0) {
+ if (!quiet)
+ p_err("bpf obj get (%s): %s", pname,
+ errno == EACCES && !is_bpffs(dirname(pname)) ?
+ "directory not in bpf file system (bpffs)" :
+ strerror(errno));
+ goto out_free;
+ }
+
+out_free:
+ free(pname);
+out_ret:
+ return fd;
+}
+
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
+{
+ enum bpf_obj_type type;
+ int fd;
+
+ fd = open_obj_pinned(path, false);
+ if (fd < 0)
+ return -1;
+
+ type = get_fd_type(fd);
+ if (type < 0) {
+ close(fd);
+ return type;
+ }
+ if (type != exp_type) {
+ p_err("incorrect object type: %s", get_fd_type_name(type));
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+int mount_bpffs_for_pin(const char *name, bool is_dir)
+{
+ char err_str[ERR_MAX_LEN];
+ char *file;
+ char *dir;
+ int err = 0;
+
+ if (is_dir && is_bpffs(name))
+ return err;
+
+ file = malloc(strlen(name) + 1);
+ if (!file) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+ strcpy(file, name);
+ dir = dirname(file);
+
+ if (is_bpffs(dir))
+ /* nothing to do if already mounted */
+ goto out_free;
+
+ if (block_mount) {
+ p_err("no BPF file system found, not mounting it due to --nomount option");
+ err = -1;
+ goto out_free;
+ }
+
+ err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
+ if (err) {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount BPF file system to pin the object (%s): %s",
+ name, err_str);
+ }
+
+out_free:
+ free(file);
+ return err;
+}
+
+int do_pin_fd(int fd, const char *name)
+{
+ int err;
+
+ err = mount_bpffs_for_pin(name, false);
+ if (err)
+ return err;
+
+ err = bpf_obj_pin(fd, name);
+ if (err)
+ p_err("can't pin the object (%s): %s", name, strerror(errno));
+
+ return err;
+}
+
+int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
+{
+ int err;
+ int fd;
+
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ fd = get_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
+
+ err = do_pin_fd(fd, *argv);
+
+ close(fd);
+ return err;
+}
+
+const char *get_fd_type_name(enum bpf_obj_type type)
+{
+ static const char * const names[] = {
+ [BPF_OBJ_UNKNOWN] = "unknown",
+ [BPF_OBJ_PROG] = "prog",
+ [BPF_OBJ_MAP] = "map",
+ [BPF_OBJ_LINK] = "link",
+ };
+
+ if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
+ return names[BPF_OBJ_UNKNOWN];
+
+ return names[type];
+}
+
+void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ char *name_buff, size_t buff_len)
+{
+ const char *prog_name = prog_info->name;
+ const struct btf_type *func_type;
+ const struct bpf_func_info finfo = {};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *prog_btf = NULL;
+
+ if (buff_len <= BPF_OBJ_NAME_LEN ||
+ strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
+ goto copy_name;
+
+ if (!prog_info->btf_id || prog_info->nr_func_info == 0)
+ goto copy_name;
+
+ info.nr_func_info = 1;
+ info.func_info_rec_size = prog_info->func_info_rec_size;
+ if (info.func_info_rec_size > sizeof(finfo))
+ info.func_info_rec_size = sizeof(finfo);
+ info.func_info = ptr_to_u64(&finfo);
+
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
+ goto copy_name;
+
+ prog_btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!prog_btf)
+ goto copy_name;
+
+ func_type = btf__type_by_id(prog_btf, finfo.type_id);
+ if (!func_type || !btf_is_func(func_type))
+ goto copy_name;
+
+ prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
+
+copy_name:
+ snprintf(name_buff, buff_len, "%s", prog_name);
+
+ if (prog_btf)
+ btf__free(prog_btf);
+}
+
+int get_fd_type(int fd)
+{
+ char path[PATH_MAX];
+ char buf[512];
+ ssize_t n;
+
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
+
+ n = readlink(path, buf, sizeof(buf));
+ if (n < 0) {
+ p_err("can't read link type: %s", strerror(errno));
+ return -1;
+ }
+ if (n == sizeof(path)) {
+ p_err("can't read link type: path too long!");
+ return -1;
+ }
+
+ if (strstr(buf, "bpf-map"))
+ return BPF_OBJ_MAP;
+ else if (strstr(buf, "bpf-prog"))
+ return BPF_OBJ_PROG;
+ else if (strstr(buf, "bpf-link"))
+ return BPF_OBJ_LINK;
+
+ return BPF_OBJ_UNKNOWN;
+}
+
+char *get_fdinfo(int fd, const char *key)
+{
+ char path[PATH_MAX];
+ char *line = NULL;
+ size_t line_n = 0;
+ ssize_t n;
+ FILE *fdi;
+
+ snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
+
+ fdi = fopen(path, "r");
+ if (!fdi)
+ return NULL;
+
+ while ((n = getline(&line, &line_n, fdi)) > 0) {
+ char *value;
+ int len;
+
+ if (!strstr(line, key))
+ continue;
+
+ fclose(fdi);
+
+ value = strchr(line, '\t');
+ if (!value || !value[1]) {
+ free(line);
+ return NULL;
+ }
+ value++;
+
+ len = strlen(value);
+ memmove(line, value, len);
+ line[len - 1] = '\0';
+
+ return line;
+ }
+
+ free(line);
+ fclose(fdi);
+ return NULL;
+}
+
+void print_data_json(uint8_t *data, size_t len)
+{
+ unsigned int i;
+
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < len; i++)
+ jsonw_printf(json_wtr, "%d", data[i]);
+ jsonw_end_array(json_wtr);
+}
+
+void print_hex_data_json(uint8_t *data, size_t len)
+{
+ unsigned int i;
+
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < len; i++)
+ jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
+ jsonw_end_array(json_wtr);
+}
+
+/* extra params for nftw cb */
+static struct hashmap *build_fn_table;
+static enum bpf_obj_type build_fn_type;
+
+static int do_build_table_cb(const char *fpath, const struct stat *sb,
+ int typeflag, struct FTW *ftwbuf)
+{
+ struct bpf_prog_info pinned_info;
+ __u32 len = sizeof(pinned_info);
+ enum bpf_obj_type objtype;
+ int fd, err = 0;
+ char *path;
+
+ if (typeflag != FTW_F)
+ goto out_ret;
+
+ fd = open_obj_pinned(fpath, true);
+ if (fd < 0)
+ goto out_ret;
+
+ objtype = get_fd_type(fd);
+ if (objtype != build_fn_type)
+ goto out_close;
+
+ memset(&pinned_info, 0, sizeof(pinned_info));
+ if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
+ goto out_close;
+
+ path = strdup(fpath);
+ if (!path) {
+ err = -1;
+ goto out_close;
+ }
+
+ err = hashmap__append(build_fn_table, pinned_info.id, path);
+ if (err) {
+ p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
+ pinned_info.id, path, strerror(errno));
+ free(path);
+ goto out_close;
+ }
+
+out_close:
+ close(fd);
+out_ret:
+ return err;
+}
+
+int build_pinned_obj_table(struct hashmap *tab,
+ enum bpf_obj_type type)
+{
+ struct mntent *mntent = NULL;
+ FILE *mntfile = NULL;
+ int flags = FTW_PHYS;
+ int nopenfd = 16;
+ int err = 0;
+
+ mntfile = setmntent("/proc/mounts", "r");
+ if (!mntfile)
+ return -1;
+
+ build_fn_table = tab;
+ build_fn_type = type;
+
+ while ((mntent = getmntent(mntfile))) {
+ char *path = mntent->mnt_dir;
+
+ if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
+ continue;
+ err = nftw(path, do_build_table_cb, nopenfd, flags);
+ if (err)
+ break;
+ }
+ fclose(mntfile);
+ return err;
+}
+
+void delete_pinned_obj_table(struct hashmap *map)
+{
+ struct hashmap_entry *entry;
+ size_t bkt;
+
+ if (!map)
+ return;
+
+ hashmap__for_each_entry(map, entry, bkt)
+ free(entry->pvalue);
+
+ hashmap__free(map);
+}
+
+unsigned int get_page_size(void)
+{
+ static int result;
+
+ if (!result)
+ result = getpagesize();
+ return result;
+}
+
+unsigned int get_possible_cpus(void)
+{
+ int cpus = libbpf_num_possible_cpus();
+
+ if (cpus < 0) {
+ p_err("Can't get # of possible cpus: %s", strerror(-cpus));
+ exit(-1);
+ }
+ return cpus;
+}
+
+static char *
+ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
+{
+ struct stat st;
+ int err;
+
+ err = stat("/proc/self/ns/net", &st);
+ if (err) {
+ p_err("Can't stat /proc/self: %s", strerror(errno));
+ return NULL;
+ }
+
+ if (st.st_dev != ns_dev || st.st_ino != ns_ino)
+ return NULL;
+
+ return if_indextoname(ifindex, buf);
+}
+
+static int read_sysfs_hex_int(char *path)
+{
+ char vendor_id_buf[8];
+ int len;
+ int fd;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ p_err("Can't open %s: %s", path, strerror(errno));
+ return -1;
+ }
+
+ len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
+ close(fd);
+ if (len < 0) {
+ p_err("Can't read %s: %s", path, strerror(errno));
+ return -1;
+ }
+ if (len >= (int)sizeof(vendor_id_buf)) {
+ p_err("Value in %s too long", path);
+ return -1;
+ }
+
+ vendor_id_buf[len] = 0;
+
+ return strtol(vendor_id_buf, NULL, 0);
+}
+
+static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
+{
+ char full_path[64];
+
+ snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
+ devname, entry_name);
+
+ return read_sysfs_hex_int(full_path);
+}
+
+const char *
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
+{
+ __maybe_unused int device_id;
+ char devname[IF_NAMESIZE];
+ int vendor_id;
+
+ if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
+ p_err("Can't get net device name for ifindex %d: %s", ifindex,
+ strerror(errno));
+ return NULL;
+ }
+
+ vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
+ if (vendor_id < 0) {
+ p_err("Can't get device vendor id for %s", devname);
+ return NULL;
+ }
+
+ switch (vendor_id) {
+#ifdef HAVE_LIBBFD_SUPPORT
+ case 0x19ee:
+ device_id = read_sysfs_netdev_hex_int(devname, "device");
+ if (device_id != 0x4000 &&
+ device_id != 0x6000 &&
+ device_id != 0x6003)
+ p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
+ *opt = "ctx4";
+ return "NFP-6xxx";
+#endif /* HAVE_LIBBFD_SUPPORT */
+ /* No NFP support in LLVM, we have no valid triple to return. */
+ default:
+ p_err("Can't get arch name for device vendor id 0x%04x",
+ vendor_id);
+ return NULL;
+ }
+}
+
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ printf(" offloaded_to ");
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ printf("%s", name);
+ else
+ printf("ifindex %u ns_dev %llu ns_ino %llu",
+ ifindex, ns_dev, ns_inode);
+}
+
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
+{
+ char name[IF_NAMESIZE];
+
+ if (!ifindex)
+ return;
+
+ jsonw_name(json_wtr, "dev");
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "ifindex", ifindex);
+ jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
+ jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
+ if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
+ jsonw_string_field(json_wtr, "ifname", name);
+ jsonw_end_object(json_wtr);
+}
+
+int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
+{
+ char *endptr;
+
+ NEXT_ARGP();
+
+ if (*val) {
+ p_err("%s already specified", what);
+ return -1;
+ }
+
+ *val = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as %s", **argv, what);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return 0;
+}
+
+int __printf(2, 0)
+print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ return vfprintf(stderr, format, args);
+}
+
+static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
+{
+ char prog_name[MAX_PROG_FULL_NAME];
+ unsigned int id = 0;
+ int fd, nb_fds = 0;
+ void *tmp;
+ int err;
+
+ while (true) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno != ENOENT) {
+ p_err("%s", strerror(errno));
+ goto err_close_fds;
+ }
+ return nb_fds;
+ }
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get prog by id (%u): %s",
+ id, strerror(errno));
+ goto err_close_fds;
+ }
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get prog info (%u): %s",
+ id, strerror(errno));
+ goto err_close_fd;
+ }
+
+ if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
+ close(fd);
+ continue;
+ }
+
+ if (!tag) {
+ get_prog_full_name(&info, fd, prog_name,
+ sizeof(prog_name));
+ if (strncmp(nametag, prog_name, sizeof(prog_name))) {
+ close(fd);
+ continue;
+ }
+ }
+
+ if (nb_fds > 0) {
+ tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+ if (!tmp) {
+ p_err("failed to realloc");
+ goto err_close_fd;
+ }
+ *fds = tmp;
+ }
+ (*fds)[nb_fds++] = fd;
+ }
+
+err_close_fd:
+ close(fd);
+err_close_fds:
+ while (--nb_fds >= 0)
+ close((*fds)[nb_fds]);
+ return -1;
+}
+
+int prog_parse_fds(int *argc, char ***argv, int **fds)
+{
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ (*fds)[0] = bpf_prog_get_fd_by_id(id);
+ if ((*fds)[0] < 0) {
+ p_err("get by id (%u): %s", id, strerror(errno));
+ return -1;
+ }
+ return 1;
+ } else if (is_prefix(**argv, "tag")) {
+ unsigned char tag[BPF_TAG_SIZE];
+
+ NEXT_ARGP();
+
+ if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
+ tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
+ != BPF_TAG_SIZE) {
+ p_err("can't parse tag");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return prog_fd_by_nametag(tag, fds, true);
+ } else if (is_prefix(**argv, "name")) {
+ char *name;
+
+ NEXT_ARGP();
+
+ name = **argv;
+ if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
+ p_err("can't parse name");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return prog_fd_by_nametag(name, fds, false);
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
+ if ((*fds)[0] < 0)
+ return -1;
+ return 1;
+ }
+
+ p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+int prog_parse_fd(int *argc, char ***argv)
+{
+ int *fds = NULL;
+ int nb_fds, fd;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = prog_parse_fds(argc, argv, &fds);
+ if (nb_fds != 1) {
+ if (nb_fds > 1) {
+ p_err("several programs match this handle");
+ while (nb_fds--)
+ close(fds[nb_fds]);
+ }
+ fd = -1;
+ goto exit_free;
+ }
+
+ fd = fds[0];
+exit_free:
+ free(fds);
+ return fd;
+}
+
+static int map_fd_by_name(char *name, int **fds)
+{
+ unsigned int id = 0;
+ int fd, nb_fds = 0;
+ void *tmp;
+ int err;
+
+ while (true) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno != ENOENT) {
+ p_err("%s", strerror(errno));
+ goto err_close_fds;
+ }
+ return nb_fds;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ goto err_close_fds;
+ }
+
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get map info (%u): %s",
+ id, strerror(errno));
+ goto err_close_fd;
+ }
+
+ if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
+ close(fd);
+ continue;
+ }
+
+ if (nb_fds > 0) {
+ tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+ if (!tmp) {
+ p_err("failed to realloc");
+ goto err_close_fd;
+ }
+ *fds = tmp;
+ }
+ (*fds)[nb_fds++] = fd;
+ }
+
+err_close_fd:
+ close(fd);
+err_close_fds:
+ while (--nb_fds >= 0)
+ close((*fds)[nb_fds]);
+ return -1;
+}
+
+int map_parse_fds(int *argc, char ***argv, int **fds)
+{
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ (*fds)[0] = bpf_map_get_fd_by_id(id);
+ if ((*fds)[0] < 0) {
+ p_err("get map by id (%u): %s", id, strerror(errno));
+ return -1;
+ }
+ return 1;
+ } else if (is_prefix(**argv, "name")) {
+ char *name;
+
+ NEXT_ARGP();
+
+ name = **argv;
+ if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
+ p_err("can't parse name");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return map_fd_by_name(name, fds);
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
+ if ((*fds)[0] < 0)
+ return -1;
+ return 1;
+ }
+
+ p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+int map_parse_fd(int *argc, char ***argv)
+{
+ int *fds = NULL;
+ int nb_fds, fd;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = map_parse_fds(argc, argv, &fds);
+ if (nb_fds != 1) {
+ if (nb_fds > 1) {
+ p_err("several maps match this handle");
+ while (nb_fds--)
+ close(fds[nb_fds]);
+ }
+ fd = -1;
+ goto exit_free;
+ }
+
+ fd = fds[0];
+exit_free:
+ free(fds);
+ return fd;
+}
+
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len)
+{
+ int err;
+ int fd;
+
+ fd = map_parse_fd(argc, argv);
+ if (fd < 0)
+ return -1;
+
+ err = bpf_map_get_info_by_fd(fd, info, info_len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ return err;
+ }
+
+ return fd;
+}
+
+size_t hash_fn_for_key_as_id(long key, void *ctx)
+{
+ return key;
+}
+
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+const char *bpf_attach_type_input_str(enum bpf_attach_type t)
+{
+ switch (t) {
+ case BPF_CGROUP_INET_INGRESS: return "ingress";
+ case BPF_CGROUP_INET_EGRESS: return "egress";
+ case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
+ case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
+ case BPF_CGROUP_SOCK_OPS: return "sock_ops";
+ case BPF_CGROUP_DEVICE: return "device";
+ case BPF_CGROUP_INET4_BIND: return "bind4";
+ case BPF_CGROUP_INET6_BIND: return "bind6";
+ case BPF_CGROUP_INET4_CONNECT: return "connect4";
+ case BPF_CGROUP_INET6_CONNECT: return "connect6";
+ case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
+ case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
+ case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
+ case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
+ case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
+ case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
+ case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
+ case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
+ case BPF_CGROUP_SYSCTL: return "sysctl";
+ case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
+ case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
+ case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
+ case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
+ case BPF_TRACE_RAW_TP: return "raw_tp";
+ case BPF_TRACE_FENTRY: return "fentry";
+ case BPF_TRACE_FEXIT: return "fexit";
+ case BPF_MODIFY_RETURN: return "mod_ret";
+ case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
+ case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
+ default: return libbpf_bpf_attach_type_str(t);
+ }
+}
+
+int pathname_concat(char *buf, int buf_sz, const char *path,
+ const char *name)
+{
+ int len;
+
+ len = snprintf(buf, buf_sz, "%s/%s", path, name);
+ if (len < 0)
+ return -EINVAL;
+ if (len >= buf_sz)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
new file mode 100644
index 0000000000..edda4fc2c4
--- /dev/null
+++ b/tools/bpf/bpftool/feature.c
@@ -0,0 +1,1344 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2019 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <net/if.h>
+#ifdef USE_LIBCAP
+#include <sys/capability.h>
+#endif
+#include <sys/utsname.h>
+#include <sys/vfs.h>
+
+#include <linux/filter.h>
+#include <linux/limits.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <zlib.h>
+
+#include "main.h"
+
+#ifndef PROC_SUPER_MAGIC
+# define PROC_SUPER_MAGIC 0x9fa0
+#endif
+
+enum probe_component {
+ COMPONENT_UNSPEC,
+ COMPONENT_KERNEL,
+ COMPONENT_DEVICE,
+};
+
+#define BPF_HELPER_MAKE_ENTRY(name) [BPF_FUNC_ ## name] = "bpf_" # name
+static const char * const helper_name[] = {
+ __BPF_FUNC_MAPPER(BPF_HELPER_MAKE_ENTRY)
+};
+
+#undef BPF_HELPER_MAKE_ENTRY
+
+static bool full_mode;
+#ifdef USE_LIBCAP
+static bool run_as_unprivileged;
+#endif
+
+/* Miscellaneous utility functions */
+
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
+static bool check_procfs(void)
+{
+ struct statfs st_fs;
+
+ if (statfs("/proc", &st_fs) < 0)
+ return false;
+ if ((unsigned long)st_fs.f_type != PROC_SUPER_MAGIC)
+ return false;
+
+ return true;
+}
+
+static void uppercase(char *str, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len && str[i] != '\0'; i++)
+ str[i] = toupper(str[i]);
+}
+
+/* Printing utility functions */
+
+static void
+print_bool_feature(const char *feat_name, const char *plain_name,
+ const char *define_name, bool res, const char *define_prefix)
+{
+ if (json_output)
+ jsonw_bool_field(json_wtr, feat_name, res);
+ else if (define_prefix)
+ printf("#define %s%sHAVE_%s\n", define_prefix,
+ res ? "" : "NO_", define_name);
+ else
+ printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
+}
+
+static void print_kernel_option(const char *name, const char *value,
+ const char *define_prefix)
+{
+ char *endptr;
+ int res;
+
+ if (json_output) {
+ if (!value) {
+ jsonw_null_field(json_wtr, name);
+ return;
+ }
+ errno = 0;
+ res = strtol(value, &endptr, 0);
+ if (!errno && *endptr == '\n')
+ jsonw_int_field(json_wtr, name, res);
+ else
+ jsonw_string_field(json_wtr, name, value);
+ } else if (define_prefix) {
+ if (value)
+ printf("#define %s%s %s\n", define_prefix,
+ name, value);
+ else
+ printf("/* %s%s is not set */\n", define_prefix, name);
+ } else {
+ if (value)
+ printf("%s is set to %s\n", name, value);
+ else
+ printf("%s is not set\n", name);
+ }
+}
+
+static void
+print_start_section(const char *json_title, const char *plain_title,
+ const char *define_comment, const char *define_prefix)
+{
+ if (json_output) {
+ jsonw_name(json_wtr, json_title);
+ jsonw_start_object(json_wtr);
+ } else if (define_prefix) {
+ printf("%s\n", define_comment);
+ } else {
+ printf("%s\n", plain_title);
+ }
+}
+
+static void print_end_section(void)
+{
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+}
+
+/* Probing functions */
+
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
+static long read_procfs(const char *path)
+{
+ char *endptr, *line = NULL;
+ size_t len = 0;
+ FILE *fd;
+ long res;
+
+ fd = fopen(path, "r");
+ if (!fd)
+ return -1;
+
+ res = getline(&line, &len, fd);
+ fclose(fd);
+ if (res < 0)
+ return -1;
+
+ errno = 0;
+ res = strtol(line, &endptr, 10);
+ if (errno || *line == '\0' || *endptr != '\n')
+ res = -1;
+ free(line);
+
+ return res;
+}
+
+static void probe_unprivileged_disabled(void)
+{
+ long res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/kernel/unprivileged_bpf_disabled");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "unprivileged_bpf_disabled", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("bpf() syscall for unprivileged users is enabled\n");
+ break;
+ case 1:
+ printf("bpf() syscall restricted to privileged users (without recovery)\n");
+ break;
+ case 2:
+ printf("bpf() syscall restricted to privileged users (admin can change)\n");
+ break;
+ case -1:
+ printf("Unable to retrieve required privileges for bpf() syscall\n");
+ break;
+ default:
+ printf("bpf() syscall restriction has unknown value %ld\n", res);
+ }
+ }
+}
+
+static void probe_jit_enable(void)
+{
+ long res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_enable");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_enable", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler is enabled\n");
+ break;
+ case 2:
+ printf("JIT compiler is enabled with debugging traces in kernel logs\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT-compiler status\n");
+ break;
+ default:
+ printf("JIT-compiler status has unknown value %ld\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_harden(void)
+{
+ long res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_harden");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_harden", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler hardening is disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler hardening is enabled for unprivileged users\n");
+ break;
+ case 2:
+ printf("JIT compiler hardening is enabled for all users\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT hardening status\n");
+ break;
+ default:
+ printf("JIT hardening status has unknown value %ld\n",
+ res);
+ }
+ }
+}
+
+static void probe_jit_kallsyms(void)
+{
+ long res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_kallsyms");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_kallsyms", res);
+ } else {
+ switch (res) {
+ case 0:
+ printf("JIT compiler kallsyms exports are disabled\n");
+ break;
+ case 1:
+ printf("JIT compiler kallsyms exports are enabled for root\n");
+ break;
+ case -1:
+ printf("Unable to retrieve JIT kallsyms export status\n");
+ break;
+ default:
+ printf("JIT kallsyms exports status has unknown value %ld\n", res);
+ }
+ }
+}
+
+static void probe_jit_limit(void)
+{
+ long res;
+
+ /* No support for C-style ouptut */
+
+ res = read_procfs("/proc/sys/net/core/bpf_jit_limit");
+ if (json_output) {
+ jsonw_int_field(json_wtr, "bpf_jit_limit", res);
+ } else {
+ switch (res) {
+ case -1:
+ printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n");
+ break;
+ default:
+ printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res);
+ }
+ }
+}
+
+static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n,
+ char **value)
+{
+ char *sep;
+
+ while (gzgets(file, buf, n)) {
+ if (strncmp(buf, "CONFIG_", 7))
+ continue;
+
+ sep = strchr(buf, '=');
+ if (!sep)
+ continue;
+
+ /* Trim ending '\n' */
+ buf[strlen(buf) - 1] = '\0';
+
+ /* Split on '=' and ensure that a value is present. */
+ *sep = '\0';
+ if (!sep[1])
+ continue;
+
+ *value = sep + 1;
+ return true;
+ }
+
+ return false;
+}
+
+static void probe_kernel_image_config(const char *define_prefix)
+{
+ static const struct {
+ const char * const name;
+ bool macro_dump;
+ } options[] = {
+ /* Enable BPF */
+ { "CONFIG_BPF", },
+ /* Enable bpf() syscall */
+ { "CONFIG_BPF_SYSCALL", },
+ /* Does selected architecture support eBPF JIT compiler */
+ { "CONFIG_HAVE_EBPF_JIT", },
+ /* Compile eBPF JIT compiler */
+ { "CONFIG_BPF_JIT", },
+ /* Avoid compiling eBPF interpreter (use JIT only) */
+ { "CONFIG_BPF_JIT_ALWAYS_ON", },
+ /* Kernel BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF", },
+ /* Kernel module BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF_MODULES", },
+
+ /* cgroups */
+ { "CONFIG_CGROUPS", },
+ /* BPF programs attached to cgroups */
+ { "CONFIG_CGROUP_BPF", },
+ /* bpf_get_cgroup_classid() helper */
+ { "CONFIG_CGROUP_NET_CLASSID", },
+ /* bpf_skb_{,ancestor_}cgroup_id() helpers */
+ { "CONFIG_SOCK_CGROUP_DATA", },
+
+ /* Tracing: attach BPF to kprobes, tracepoints, etc. */
+ { "CONFIG_BPF_EVENTS", },
+ /* Kprobes */
+ { "CONFIG_KPROBE_EVENTS", },
+ /* Uprobes */
+ { "CONFIG_UPROBE_EVENTS", },
+ /* Tracepoints */
+ { "CONFIG_TRACING", },
+ /* Syscall tracepoints */
+ { "CONFIG_FTRACE_SYSCALLS", },
+ /* bpf_override_return() helper support for selected arch */
+ { "CONFIG_FUNCTION_ERROR_INJECTION", },
+ /* bpf_override_return() helper */
+ { "CONFIG_BPF_KPROBE_OVERRIDE", },
+
+ /* Network */
+ { "CONFIG_NET", },
+ /* AF_XDP sockets */
+ { "CONFIG_XDP_SOCKETS", },
+ /* BPF_PROG_TYPE_LWT_* and related helpers */
+ { "CONFIG_LWTUNNEL_BPF", },
+ /* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
+ { "CONFIG_NET_ACT_BPF", },
+ /* BPF_PROG_TYPE_SCHED_CLS, TC filters */
+ { "CONFIG_NET_CLS_BPF", },
+ /* TC clsact qdisc */
+ { "CONFIG_NET_CLS_ACT", },
+ /* Ingress filtering with TC */
+ { "CONFIG_NET_SCH_INGRESS", },
+ /* bpf_skb_get_xfrm_state() helper */
+ { "CONFIG_XFRM", },
+ /* bpf_get_route_realm() helper */
+ { "CONFIG_IP_ROUTE_CLASSID", },
+ /* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
+ { "CONFIG_IPV6_SEG6_BPF", },
+ /* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
+ { "CONFIG_BPF_LIRC_MODE2", },
+ /* BPF stream parser and BPF socket maps */
+ { "CONFIG_BPF_STREAM_PARSER", },
+ /* xt_bpf module for passing BPF programs to netfilter */
+ { "CONFIG_NETFILTER_XT_MATCH_BPF", },
+ /* bpfilter back-end for iptables */
+ { "CONFIG_BPFILTER", },
+ /* bpftilter module with "user mode helper" */
+ { "CONFIG_BPFILTER_UMH", },
+
+ /* test_bpf module for BPF tests */
+ { "CONFIG_TEST_BPF", },
+
+ /* Misc configs useful in BPF C programs */
+ /* jiffies <-> sec conversion for bpf_jiffies64() helper */
+ { "CONFIG_HZ", true, }
+ };
+ char *values[ARRAY_SIZE(options)] = { };
+ struct utsname utsn;
+ char path[PATH_MAX];
+ gzFile file = NULL;
+ char buf[4096];
+ char *value;
+ size_t i;
+
+ if (!uname(&utsn)) {
+ snprintf(path, sizeof(path), "/boot/config-%s", utsn.release);
+
+ /* gzopen also accepts uncompressed files. */
+ file = gzopen(path, "r");
+ }
+
+ if (!file) {
+ /* Some distributions build with CONFIG_IKCONFIG=y and put the
+ * config file at /proc/config.gz.
+ */
+ file = gzopen("/proc/config.gz", "r");
+ }
+ if (!file) {
+ p_info("skipping kernel config, can't open file: %s",
+ strerror(errno));
+ goto end_parse;
+ }
+ /* Sanity checks */
+ if (!gzgets(file, buf, sizeof(buf)) ||
+ !gzgets(file, buf, sizeof(buf))) {
+ p_info("skipping kernel config, can't read from file: %s",
+ strerror(errno));
+ goto end_parse;
+ }
+ if (strcmp(buf, "# Automatically generated file; DO NOT EDIT.\n")) {
+ p_info("skipping kernel config, can't find correct file");
+ goto end_parse;
+ }
+
+ while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) {
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ if ((define_prefix && !options[i].macro_dump) ||
+ values[i] || strcmp(buf, options[i].name))
+ continue;
+
+ values[i] = strdup(value);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(options); i++) {
+ if (define_prefix && !options[i].macro_dump)
+ continue;
+ print_kernel_option(options[i].name, values[i], define_prefix);
+ free(values[i]);
+ }
+
+end_parse:
+ if (file)
+ gzclose(file);
+}
+
+static bool probe_bpf_syscall(const char *define_prefix)
+{
+ bool res;
+
+ bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
+ res = (errno != ENOSYS);
+
+ print_bool_feature("have_bpf_syscall",
+ "bpf() syscall",
+ "BPF_SYSCALL",
+ res, define_prefix);
+
+ return res;
+}
+
+static bool
+probe_prog_load_ifindex(enum bpf_prog_type prog_type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ char *log_buf, size_t log_buf_sz,
+ __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = log_buf_sz,
+ .log_level = log_buf ? 1 : 0,
+ .prog_ifindex = ifindex,
+ );
+ int fd;
+
+ errno = 0;
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN()
+ };
+
+ return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns),
+ NULL, 0, ifindex);
+}
+
+static void
+probe_prog_type(enum bpf_prog_type prog_type, const char *prog_type_str,
+ bool *supported_types, const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF program_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex) {
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ res = probe_prog_type_ifindex(prog_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL) > 0;
+ }
+
+#ifdef USE_LIBCAP
+ /* Probe may succeed even if program load fails, for unprivileged users
+ * check that we did not fail because of insufficient permissions
+ */
+ if (run_as_unprivileged && errno == EPERM)
+ res = false;
+#endif
+
+ supported_types[prog_type] |= res;
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(prog_type_str) > maxlen) {
+ p_info("program type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_prog_type", prog_type_str);
+ sprintf(define_name, "%s_prog_type", prog_type_str);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_str);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ int key_size, value_size, max_entries;
+ int fd;
+
+ opts.map_ifindex = ifindex;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+
+ fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries,
+ &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
+static void
+probe_map_type(enum bpf_map_type map_type, char const *map_type_str,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128], plain_desc[128], define_name[128];
+ const char *plain_comment = "eBPF map_type ";
+ size_t maxlen;
+ bool res;
+
+ if (ifindex) {
+ switch (map_type) {
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ break;
+ default:
+ return;
+ }
+
+ res = probe_map_type_ifindex(map_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_map_type(map_type, NULL) > 0;
+ }
+
+ /* Probe result depends on the success of map creation, no additional
+ * check required for unprivileged users
+ */
+
+ maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
+ if (strlen(map_type_str) > maxlen) {
+ p_info("map type name too long");
+ return;
+ }
+
+ sprintf(feat_name, "have_%s_map_type", map_type_str);
+ sprintf(define_name, "%s_map_type", map_type_str);
+ uppercase(define_name, sizeof(define_name));
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_str);
+ print_bool_feature(feat_name, plain_desc, define_name, res,
+ define_prefix);
+}
+
+static bool
+probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf,
+ sizeof(buf), ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+static bool
+probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
+ const char *define_prefix, unsigned int id,
+ const char *ptype_name, __u32 ifindex)
+{
+ bool res = false;
+
+ if (supported_type) {
+ if (ifindex)
+ res = probe_helper_ifindex(id, prog_type, ifindex);
+ else
+ res = libbpf_probe_bpf_helper(prog_type, id, NULL) > 0;
+#ifdef USE_LIBCAP
+ /* Probe may succeed even if program load fails, for
+ * unprivileged users check that we did not fail because of
+ * insufficient permissions
+ */
+ if (run_as_unprivileged && errno == EPERM)
+ res = false;
+#endif
+ }
+
+ if (json_output) {
+ if (res)
+ jsonw_string(json_wtr, helper_name[id]);
+ } else if (define_prefix) {
+ printf("#define %sBPF__PROG_TYPE_%s__HELPER_%s %s\n",
+ define_prefix, ptype_name, helper_name[id],
+ res ? "1" : "0");
+ } else {
+ if (res)
+ printf("\n\t- %s", helper_name[id]);
+ }
+
+ return res;
+}
+
+static void
+probe_helpers_for_progtype(enum bpf_prog_type prog_type,
+ const char *prog_type_str, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
+{
+ char feat_name[128];
+ unsigned int id;
+ bool probe_res = false;
+
+ if (ifindex)
+ /* Only test helpers for offload-able program types */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_XDP:
+ break;
+ default:
+ return;
+ }
+
+ if (json_output) {
+ sprintf(feat_name, "%s_available_helpers", prog_type_str);
+ jsonw_name(json_wtr, feat_name);
+ jsonw_start_array(json_wtr);
+ } else if (!define_prefix) {
+ printf("eBPF helpers supported for program type %s:",
+ prog_type_str);
+ }
+
+ for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
+ /* Skip helper functions which emit dmesg messages when not in
+ * the full mode.
+ */
+ switch (id) {
+ case BPF_FUNC_trace_printk:
+ case BPF_FUNC_trace_vprintk:
+ case BPF_FUNC_probe_write_user:
+ if (!full_mode)
+ continue;
+ fallthrough;
+ default:
+ probe_res |= probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, prog_type_str,
+ ifindex);
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ else if (!define_prefix) {
+ printf("\n");
+ if (!probe_res) {
+ if (!supported_type)
+ printf("\tProgram type not supported\n");
+ else
+ printf("\tCould not determine which helpers are available\n");
+ }
+ }
+
+
+}
+
+static void
+probe_misc_feature(struct bpf_insn *insns, size_t len,
+ const char *define_prefix, __u32 ifindex,
+ const char *feat_name, const char *plain_name,
+ const char *define_name)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .prog_ifindex = ifindex,
+ );
+ bool res;
+ int fd;
+
+ errno = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ insns, len, &opts);
+ res = fd >= 0 || !errno;
+
+ if (fd >= 0)
+ close(fd);
+
+ print_bool_feature(feat_name, plain_name, define_name, res,
+ define_prefix);
+}
+
+/*
+ * Probe for availability of kernel commit (5.3):
+ *
+ * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
+ */
+static void probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[BPF_MAXINSNS + 1];
+ int i;
+
+ for (i = 0; i < BPF_MAXINSNS; i++)
+ insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
+ insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_large_insn_limit",
+ "Large program size limit",
+ "LARGE_INSN_LIMIT");
+}
+
+/*
+ * Probe for bounded loop support introduced in commit 2589726d12a1
+ * ("bpf: introduce bounded loops").
+ */
+static void
+probe_bounded_loops(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_bounded_loops",
+ "Bounded loop support",
+ "BOUNDED_LOOPS");
+}
+
+/*
+ * Probe for the v2 instruction set extension introduced in commit 92b31a9af73b
+ * ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions").
+ */
+static void
+probe_v2_isa_extension(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_v2_isa_extension",
+ "ISA extension v2",
+ "V2_ISA_EXTENSION");
+}
+
+/*
+ * Probe for the v3 instruction set extension introduced in commit 092ed0968bb6
+ * ("bpf: verifier support JMP32").
+ */
+static void
+probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_v3_isa_extension",
+ "ISA extension v3",
+ "V3_ISA_EXTENSION");
+}
+
+static void
+section_system_config(enum probe_component target, const char *define_prefix)
+{
+ switch (target) {
+ case COMPONENT_KERNEL:
+ case COMPONENT_UNSPEC:
+ print_start_section("system_config",
+ "Scanning system configuration...",
+ "/*** Misc kernel config items ***/",
+ define_prefix);
+ if (!define_prefix) {
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
+ }
+ probe_kernel_image_config(define_prefix);
+ print_end_section();
+ break;
+ default:
+ break;
+ }
+}
+
+static bool section_syscall_config(const char *define_prefix)
+{
+ bool res;
+
+ print_start_section("syscall_config",
+ "Scanning system call availability...",
+ "/*** System call availability ***/",
+ define_prefix);
+ res = probe_bpf_syscall(define_prefix);
+ print_end_section();
+
+ return res;
+}
+
+static void
+section_program_types(bool *supported_types, const char *define_prefix,
+ __u32 ifindex)
+{
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
+
+ print_start_section("program_types",
+ "Scanning eBPF program types...",
+ "/*** eBPF program types ***/",
+ define_prefix);
+
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_prog_type(prog_type, prog_type_str, supported_types, define_prefix,
+ ifindex);
+ }
+
+ print_end_section();
+}
+
+static void section_map_types(const char *define_prefix, __u32 ifindex)
+{
+ unsigned int map_type = BPF_MAP_TYPE_UNSPEC;
+ const char *map_type_str;
+
+ print_start_section("map_types",
+ "Scanning eBPF map types...",
+ "/*** eBPF map types ***/",
+ define_prefix);
+
+ while (true) {
+ map_type++;
+ map_type_str = libbpf_bpf_map_type_str(map_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!map_type_str)
+ break;
+
+ probe_map_type(map_type, map_type_str, define_prefix, ifindex);
+ }
+
+ print_end_section();
+}
+
+static void
+section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
+{
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
+
+ print_start_section("helpers",
+ "Scanning eBPF helper functions...",
+ "/*** eBPF helper functions ***/",
+ define_prefix);
+
+ if (define_prefix)
+ printf("/*\n"
+ " * Use %sHAVE_PROG_TYPE_HELPER(prog_type_name, helper_name)\n"
+ " * to determine if <helper_name> is available for <prog_type_name>,\n"
+ " * e.g.\n"
+ " * #if %sHAVE_PROG_TYPE_HELPER(xdp, bpf_redirect)\n"
+ " * // do stuff with this helper\n"
+ " * #elif\n"
+ " * // use a workaround\n"
+ " * #endif\n"
+ " */\n"
+ "#define %sHAVE_PROG_TYPE_HELPER(prog_type, helper) \\\n"
+ " %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
+ define_prefix, define_prefix, define_prefix,
+ define_prefix);
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_helpers_for_progtype(prog_type, prog_type_str,
+ supported_types[prog_type],
+ define_prefix,
+ ifindex);
+ }
+
+ print_end_section();
+}
+
+static void section_misc(const char *define_prefix, __u32 ifindex)
+{
+ print_start_section("misc",
+ "Scanning miscellaneous eBPF features...",
+ "/*** eBPF misc features ***/",
+ define_prefix);
+ probe_large_insn_limit(define_prefix, ifindex);
+ probe_bounded_loops(define_prefix, ifindex);
+ probe_v2_isa_extension(define_prefix, ifindex);
+ probe_v3_isa_extension(define_prefix, ifindex);
+ print_end_section();
+}
+
+#ifdef USE_LIBCAP
+#define capability(c) { c, false, #c }
+#define capability_msg(a, i) a[i].set ? "" : a[i].name, a[i].set ? "" : ", "
+#endif
+
+static int handle_perms(void)
+{
+#ifdef USE_LIBCAP
+ struct {
+ cap_value_t cap;
+ bool set;
+ char name[14]; /* strlen("CAP_SYS_ADMIN") */
+ } bpf_caps[] = {
+ capability(CAP_SYS_ADMIN),
+#ifdef CAP_BPF
+ capability(CAP_BPF),
+ capability(CAP_NET_ADMIN),
+ capability(CAP_PERFMON),
+#endif
+ };
+ cap_value_t cap_list[ARRAY_SIZE(bpf_caps)];
+ unsigned int i, nb_bpf_caps = 0;
+ bool cap_sys_admin_only = true;
+ cap_flag_value_t val;
+ int res = -1;
+ cap_t caps;
+
+ caps = cap_get_proc();
+ if (!caps) {
+ p_err("failed to get capabilities for process: %s",
+ strerror(errno));
+ return -1;
+ }
+
+#ifdef CAP_BPF
+ if (CAP_IS_SUPPORTED(CAP_BPF))
+ cap_sys_admin_only = false;
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(bpf_caps); i++) {
+ const char *cap_name = bpf_caps[i].name;
+ cap_value_t cap = bpf_caps[i].cap;
+
+ if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val)) {
+ p_err("bug: failed to retrieve %s status: %s", cap_name,
+ strerror(errno));
+ goto exit_free;
+ }
+
+ if (val == CAP_SET) {
+ bpf_caps[i].set = true;
+ cap_list[nb_bpf_caps++] = cap;
+ }
+
+ if (cap_sys_admin_only)
+ /* System does not know about CAP_BPF, meaning that
+ * CAP_SYS_ADMIN is the only capability required. We
+ * just checked it, break.
+ */
+ break;
+ }
+
+ if ((run_as_unprivileged && !nb_bpf_caps) ||
+ (!run_as_unprivileged && nb_bpf_caps == ARRAY_SIZE(bpf_caps)) ||
+ (!run_as_unprivileged && cap_sys_admin_only && nb_bpf_caps)) {
+ /* We are all good, exit now */
+ res = 0;
+ goto exit_free;
+ }
+
+ if (!run_as_unprivileged) {
+ if (cap_sys_admin_only)
+ p_err("missing %s, required for full feature probing; run as root or use 'unprivileged'",
+ bpf_caps[0].name);
+ else
+ p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
+ capability_msg(bpf_caps, 0),
+#ifdef CAP_BPF
+ capability_msg(bpf_caps, 1),
+ capability_msg(bpf_caps, 2),
+ capability_msg(bpf_caps, 3)
+#else
+ "", "", "", "", "", ""
+#endif /* CAP_BPF */
+ );
+ goto exit_free;
+ }
+
+ /* if (run_as_unprivileged && nb_bpf_caps > 0), drop capabilities. */
+ if (cap_set_flag(caps, CAP_EFFECTIVE, nb_bpf_caps, cap_list,
+ CAP_CLEAR)) {
+ p_err("bug: failed to clear capabilities: %s", strerror(errno));
+ goto exit_free;
+ }
+
+ if (cap_set_proc(caps)) {
+ p_err("failed to drop capabilities: %s", strerror(errno));
+ goto exit_free;
+ }
+
+ res = 0;
+
+exit_free:
+ if (cap_free(caps) && !res) {
+ p_err("failed to clear storage object for capabilities: %s",
+ strerror(errno));
+ res = -1;
+ }
+
+ return res;
+#else
+ /* Detection assumes user has specific privileges.
+ * We do not use libcap so let's approximate, and restrict usage to
+ * root user only.
+ */
+ if (geteuid()) {
+ p_err("full feature probing requires root privileges");
+ return -1;
+ }
+
+ return 0;
+#endif /* USE_LIBCAP */
+}
+
+static int do_probe(int argc, char **argv)
+{
+ enum probe_component target = COMPONENT_UNSPEC;
+ const char *define_prefix = NULL;
+ bool supported_types[128] = {};
+ __u32 ifindex = 0;
+ char *ifname;
+
+ set_max_rlimit();
+
+ while (argc) {
+ if (is_prefix(*argv, "kernel")) {
+ if (target != COMPONENT_UNSPEC) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ target = COMPONENT_KERNEL;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "dev")) {
+ NEXT_ARG();
+
+ if (target != COMPONENT_UNSPEC || ifindex) {
+ p_err("component to probe already specified");
+ return -1;
+ }
+ if (!REQ_ARGS(1))
+ return -1;
+
+ target = COMPONENT_DEVICE;
+ ifname = GET_ARG();
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ p_err("unrecognized netdevice '%s': %s", ifname,
+ strerror(errno));
+ return -1;
+ }
+ } else if (is_prefix(*argv, "full")) {
+ full_mode = true;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "macros") && !define_prefix) {
+ define_prefix = "";
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "prefix")) {
+ if (!define_prefix) {
+ p_err("'prefix' argument can only be use after 'macros'");
+ return -1;
+ }
+ if (strcmp(define_prefix, "")) {
+ p_err("'prefix' already defined");
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ return -1;
+ define_prefix = GET_ARG();
+ } else if (is_prefix(*argv, "unprivileged")) {
+#ifdef USE_LIBCAP
+ run_as_unprivileged = true;
+ NEXT_ARG();
+#else
+ p_err("unprivileged run not supported, recompile bpftool with libcap");
+ return -1;
+#endif
+ } else {
+ p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ /* Full feature detection requires specific privileges.
+ * Let's approximate, and warn if user is not root.
+ */
+ if (handle_perms())
+ return -1;
+
+ if (json_output) {
+ define_prefix = NULL;
+ jsonw_start_object(json_wtr);
+ }
+
+ section_system_config(target, define_prefix);
+ if (!section_syscall_config(define_prefix))
+ /* bpf() syscall unavailable, don't probe other BPF features */
+ goto exit_close_json;
+ section_program_types(supported_types, define_prefix, ifindex);
+ section_map_types(define_prefix, ifindex);
+ section_helpers(supported_types, define_prefix, ifindex);
+ section_misc(define_prefix, ifindex);
+
+exit_close_json:
+ if (json_output)
+ /* End root object */
+ jsonw_end_object(json_wtr);
+
+ return 0;
+}
+
+static const char *get_helper_name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(helper_name))
+ return NULL;
+
+ return helper_name[id];
+}
+
+static int do_list_builtins(int argc, char **argv)
+{
+ const char *(*get_name)(unsigned int id);
+ unsigned int id = 0;
+
+ if (argc < 1)
+ usage();
+
+ if (is_prefix(*argv, "prog_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_prog_type_str;
+ } else if (is_prefix(*argv, "map_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_map_type_str;
+ } else if (is_prefix(*argv, "attach_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_attach_type_str;
+ } else if (is_prefix(*argv, "link_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_link_type_str;
+ } else if (is_prefix(*argv, "helpers")) {
+ get_name = get_helper_name;
+ } else {
+ p_err("expected 'prog_types', 'map_types', 'attach_types', 'link_types' or 'helpers', got: %s", *argv);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr); /* root array */
+
+ while (true) {
+ const char *name;
+
+ name = get_name(id++);
+ if (!name)
+ break;
+ if (json_output)
+ jsonw_string(json_wtr, name);
+ else
+ printf("%s\n", name);
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr); /* root array */
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n"
+ " %1$s %2$s list_builtins GROUP\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " COMPONENT := { kernel | dev NAME }\n"
+ " GROUP := { prog_types | map_types | attach_types | link_types | helpers }\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "probe", do_probe },
+ { "list_builtins", do_list_builtins },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_feature(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
new file mode 100644
index 0000000000..882bf8e6e7
--- /dev/null
+++ b/tools/bpf/bpftool/gen.c
@@ -0,0 +1,2336 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Facebook */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/err.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <bpf/btf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define MAX_OBJ_NAME_LEN 64
+
+static void sanitize_identifier(char *name)
+{
+ int i;
+
+ for (i = 0; name[i]; i++)
+ if (!isalnum(name[i]) && name[i] != '_')
+ name[i] = '_';
+}
+
+static bool str_has_prefix(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
+static bool str_has_suffix(const char *str, const char *suffix)
+{
+ size_t i, n1 = strlen(str), n2 = strlen(suffix);
+
+ if (n1 < n2)
+ return false;
+
+ for (i = 0; i < n2; i++) {
+ if (str[n1 - i - 1] != suffix[n2 - i - 1])
+ return false;
+ }
+
+ return true;
+}
+
+static void get_obj_name(char *name, const char *file)
+{
+ /* Using basename() GNU version which doesn't modify arg. */
+ strncpy(name, basename(file), MAX_OBJ_NAME_LEN - 1);
+ name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ if (str_has_suffix(name, ".o"))
+ name[strlen(name) - 2] = '\0';
+ sanitize_identifier(name);
+}
+
+static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
+{
+ int i;
+
+ sprintf(guard, "__%s_%s__", obj_name, suffix);
+ for (i = 0; guard[i]; i++)
+ guard[i] = toupper(guard[i]);
+}
+
+static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
+{
+ static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
+ const char *name = bpf_map__name(map);
+ int i, n;
+
+ if (!bpf_map__is_internal(map)) {
+ snprintf(buf, buf_sz, "%s", name);
+ return true;
+ }
+
+ for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
+ const char *sfx = sfxs[i], *p;
+
+ p = strstr(name, sfx);
+ if (p) {
+ snprintf(buf, buf_sz, "%s", p + 1);
+ sanitize_identifier(buf);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
+{
+ static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
+ int i, n;
+
+ for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
+ const char *pfx = pfxs[i];
+
+ if (str_has_prefix(sec_name, pfx)) {
+ snprintf(buf, buf_sz, "%s", sec_name + 1);
+ sanitize_identifier(buf);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
+{
+ vprintf(fmt, args);
+}
+
+static int codegen_datasec_def(struct bpf_object *obj,
+ struct btf *btf,
+ struct btf_dump *d,
+ const struct btf_type *sec,
+ const char *obj_name)
+{
+ const char *sec_name = btf__name_by_offset(btf, sec->name_off);
+ const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
+ int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
+ char var_ident[256], sec_ident[256];
+ bool strip_mods = false;
+
+ if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
+ return 0;
+
+ if (strcmp(sec_name, ".kconfig") != 0)
+ strip_mods = true;
+
+ printf(" struct %s__%s {\n", obj_name, sec_ident);
+ for (i = 0; i < vlen; i++, sec_var++) {
+ const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
+ const char *var_name = btf__name_by_offset(btf, var->name_off);
+ DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
+ .field_name = var_ident,
+ .indent_level = 2,
+ .strip_mods = strip_mods,
+ );
+ int need_off = sec_var->offset, align_off, align;
+ __u32 var_type_id = var->type;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ if (off > need_off) {
+ p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
+ sec_name, i, need_off, off);
+ return -EINVAL;
+ }
+
+ align = btf__align_of(btf, var->type);
+ if (align <= 0) {
+ p_err("Failed to determine alignment of variable '%s': %d",
+ var_name, align);
+ return -EINVAL;
+ }
+ /* Assume 32-bit architectures when generating data section
+ * struct memory layout. Given bpftool can't know which target
+ * host architecture it's emitting skeleton for, we need to be
+ * conservative and assume 32-bit one to ensure enough padding
+ * bytes are generated for pointer and long types. This will
+ * still work correctly for 64-bit architectures, because in
+ * the worst case we'll generate unnecessary padding field,
+ * which on 64-bit architectures is not strictly necessary and
+ * would be handled by natural 8-byte alignment. But it still
+ * will be a correct memory layout, based on recorded offsets
+ * in BTF.
+ */
+ if (align > 4)
+ align = 4;
+
+ align_off = (off + align - 1) / align * align;
+ if (align_off != need_off) {
+ printf("\t\tchar __pad%d[%d];\n",
+ pad_cnt, need_off - off);
+ pad_cnt++;
+ }
+
+ /* sanitize variable name, e.g., for static vars inside
+ * a function, it's name is '<function name>.<variable name>',
+ * which we'll turn into a '<function name>_<variable name>'
+ */
+ var_ident[0] = '\0';
+ strncat(var_ident, var_name, sizeof(var_ident) - 1);
+ sanitize_identifier(var_ident);
+
+ printf("\t\t");
+ err = btf_dump__emit_type_decl(d, var_type_id, &opts);
+ if (err)
+ return err;
+ printf(";\n");
+
+ off = sec_var->offset + sec_var->size;
+ }
+ printf(" } *%s;\n", sec_ident);
+ return 0;
+}
+
+static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
+{
+ int n = btf__type_cnt(btf), i;
+ char sec_ident[256];
+
+ for (i = 1; i < n; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
+ const char *name;
+
+ if (!btf_is_datasec(t))
+ continue;
+
+ name = btf__str_by_offset(btf, t->name_off);
+ if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
+ continue;
+
+ if (strcmp(sec_ident, map_ident) == 0)
+ return t;
+ }
+ return NULL;
+}
+
+static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
+{
+ if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ return false;
+
+ if (!get_map_ident(map, buf, sz))
+ return false;
+
+ return true;
+}
+
+static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct btf_dump *d;
+ struct bpf_map *map;
+ const struct btf_type *sec;
+ char map_ident[256];
+ int err = 0;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ bpf_object__for_each_map(map, obj) {
+ /* only generate definitions for memory-mapped internal maps */
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
+
+ sec = find_type_for_map(btf, map_ident);
+
+ /* In some cases (e.g., sections like .rodata.cst16 containing
+ * compiler allocated string constants only) there will be
+ * special internal maps with no corresponding DATASEC BTF
+ * type. In such case, generate empty structs for each such
+ * map. It will still be memory-mapped and its contents
+ * accessible from user-space through BPF skeleton.
+ */
+ if (!sec) {
+ printf(" struct %s__%s {\n", obj_name, map_ident);
+ printf(" } *%s;\n", map_ident);
+ } else {
+ err = codegen_datasec_def(obj, btf, d, sec, obj_name);
+ if (err)
+ goto out;
+ }
+ }
+
+
+out:
+ btf_dump__free(d);
+ return err;
+}
+
+static bool btf_is_ptr_to_func_proto(const struct btf *btf,
+ const struct btf_type *v)
+{
+ return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
+}
+
+static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct btf_dump *d;
+ struct bpf_map *map;
+ const struct btf_type *sec, *var;
+ const struct btf_var_secinfo *sec_var;
+ int i, err = 0, vlen;
+ char map_ident[256], sec_ident[256];
+ bool strip_mods = false, needs_typeof = false;
+ const char *sec_name, *var_name;
+ __u32 var_type_id;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ bpf_object__for_each_map(map, obj) {
+ /* only generate definitions for memory-mapped internal maps */
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
+
+ sec = find_type_for_map(btf, map_ident);
+ if (!sec)
+ continue;
+
+ sec_name = btf__name_by_offset(btf, sec->name_off);
+ if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
+ continue;
+
+ strip_mods = strcmp(sec_name, ".kconfig") != 0;
+ printf(" struct %s__%s {\n", obj_name, sec_ident);
+
+ sec_var = btf_var_secinfos(sec);
+ vlen = btf_vlen(sec);
+ for (i = 0; i < vlen; i++, sec_var++) {
+ DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
+ .indent_level = 2,
+ .strip_mods = strip_mods,
+ /* we'll print the name separately */
+ .field_name = "",
+ );
+
+ var = btf__type_by_id(btf, sec_var->type);
+ var_name = btf__name_by_offset(btf, var->name_off);
+ var_type_id = var->type;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* The datasec member has KIND_VAR but we want the
+ * underlying type of the variable (e.g. KIND_INT).
+ */
+ var = skip_mods_and_typedefs(btf, var->type, NULL);
+
+ printf("\t\t");
+ /* Func and array members require special handling.
+ * Instead of producing `typename *var`, they produce
+ * `typeof(typename) *var`. This allows us to keep a
+ * similar syntax where the identifier is just prefixed
+ * by *, allowing us to ignore C declaration minutiae.
+ */
+ needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
+ if (needs_typeof)
+ printf("typeof(");
+
+ err = btf_dump__emit_type_decl(d, var_type_id, &opts);
+ if (err)
+ goto out;
+
+ if (needs_typeof)
+ printf(")");
+
+ printf(" *%s;\n", var_name);
+ }
+ printf(" } %s;\n", sec_ident);
+ }
+
+out:
+ btf_dump__free(d);
+ return err;
+}
+
+static void codegen(const char *template, ...)
+{
+ const char *src, *end;
+ int skip_tabs = 0, n;
+ char *s, *dst;
+ va_list args;
+ char c;
+
+ n = strlen(template);
+ s = malloc(n + 1);
+ if (!s)
+ exit(-1);
+ src = template;
+ dst = s;
+
+ /* find out "baseline" indentation to skip */
+ while ((c = *src++)) {
+ if (c == '\t') {
+ skip_tabs++;
+ } else if (c == '\n') {
+ break;
+ } else {
+ p_err("unrecognized character at pos %td in template '%s': '%c'",
+ src - template - 1, template, c);
+ free(s);
+ exit(-1);
+ }
+ }
+
+ while (*src) {
+ /* skip baseline indentation tabs */
+ for (n = skip_tabs; n > 0; n--, src++) {
+ if (*src != '\t') {
+ p_err("not enough tabs at pos %td in template '%s'",
+ src - template - 1, template);
+ free(s);
+ exit(-1);
+ }
+ }
+ /* trim trailing whitespace */
+ end = strchrnul(src, '\n');
+ for (n = end - src; n > 0 && isspace(src[n - 1]); n--)
+ ;
+ memcpy(dst, src, n);
+ dst += n;
+ if (*end)
+ *dst++ = '\n';
+ src = *end ? end + 1 : end;
+ }
+ *dst++ = '\0';
+
+ /* print out using adjusted template */
+ va_start(args, template);
+ n = vprintf(s, args);
+ va_end(args);
+
+ free(s);
+}
+
+static void print_hex(const char *data, int data_sz)
+{
+ int i, len;
+
+ for (i = 0, len = 0; i < data_sz; i++) {
+ int w = data[i] ? 4 : 2;
+
+ len += w;
+ if (len > 78) {
+ printf("\\\n");
+ len = w;
+ }
+ if (!data[i])
+ printf("\\0");
+ else
+ printf("\\x%02x", (unsigned char)data[i]);
+ }
+}
+
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t map_sz;
+
+ map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
+ map_sz = roundup(map_sz, page_sz);
+ return map_sz;
+}
+
+/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
+static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct bpf_map *map;
+ struct btf_var_secinfo *sec_var;
+ int i, vlen;
+ const struct btf_type *sec;
+ char map_ident[256], var_ident[256];
+
+ if (!btf)
+ return;
+
+ codegen("\
+ \n\
+ __attribute__((unused)) static void \n\
+ %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
+ { \n\
+ #ifdef __cplusplus \n\
+ #define _Static_assert static_assert \n\
+ #endif \n\
+ ", obj_name);
+
+ bpf_object__for_each_map(map, obj) {
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
+
+ sec = find_type_for_map(btf, map_ident);
+ if (!sec) {
+ /* best effort, couldn't find the type for this map */
+ continue;
+ }
+
+ sec_var = btf_var_secinfos(sec);
+ vlen = btf_vlen(sec);
+
+ for (i = 0; i < vlen; i++, sec_var++) {
+ const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
+ const char *var_name = btf__name_by_offset(btf, var->name_off);
+ long var_size;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ var_size = btf__resolve_size(btf, var->type);
+ if (var_size < 0)
+ continue;
+
+ var_ident[0] = '\0';
+ strncat(var_ident, var_name, sizeof(var_ident) - 1);
+ sanitize_identifier(var_ident);
+
+ printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
+ map_ident, var_ident, var_size, var_ident);
+ }
+ }
+ codegen("\
+ \n\
+ #ifdef __cplusplus \n\
+ #undef _Static_assert \n\
+ #endif \n\
+ } \n\
+ ");
+}
+
+static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *tp_name;
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__%2$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int prog_fd = skel->progs.%2$s.prog_fd; \n\
+ ", obj_name, bpf_program__name(prog));
+
+ switch (bpf_program__type(prog)) {
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
+ printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
+ printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
+ else
+ printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
+ break;
+ default:
+ printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
+ break;
+ }
+ codegen("\
+ \n\
+ \n\
+ if (fd > 0) \n\
+ skel->links.%1$s_fd = fd; \n\
+ return fd; \n\
+ } \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int ret = 0; \n\
+ \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
+ ", obj_name, bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ return ret < 0 ? ret : 0; \n\
+ } \n\
+ \n\
+ static inline void \n\
+ %1$s__detach(struct %1$s *skel) \n\
+ { \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->links.%1$s_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ } \n\
+ ");
+}
+
+static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ char ident[256];
+
+ codegen("\
+ \n\
+ static void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ %1$s__detach(skel); \n\
+ ",
+ obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->progs.%1$s.prog_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ if (bpf_map__is_internal(map) &&
+ (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
+ ident, bpf_map_mmap_sz(map));
+ codegen("\
+ \n\
+ skel_closenz(skel->maps.%1$s.map_fd); \n\
+ ", ident);
+ }
+ codegen("\
+ \n\
+ skel_free(skel); \n\
+ } \n\
+ ",
+ obj_name);
+}
+
+static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
+{
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
+ struct bpf_map *map;
+ char ident[256];
+ int err = 0;
+
+ err = bpf_object__gen_loader(obj, &opts);
+ if (err)
+ return err;
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto out;
+ }
+ /* If there was no error during load then gen_loader_opts
+ * are populated with the loader program.
+ */
+
+ /* finish generating 'struct skel' */
+ codegen("\
+ \n\
+ }; \n\
+ ", obj_name);
+
+
+ codegen_attach_detach(obj, obj_name);
+
+ codegen_destroy(obj, obj_name);
+
+ codegen("\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = skel_alloc(sizeof(*skel)); \n\
+ if (!skel) \n\
+ goto cleanup; \n\
+ skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
+ ",
+ obj_name, opts.data_sz);
+ bpf_object__for_each_map(map, obj) {
+ const void *mmap_data = NULL;
+ size_t mmap_size = 0;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ codegen("\
+ \n\
+ skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
+ ", ident);
+ mmap_data = bpf_map__initial_value(map, &mmap_size);
+ print_hex(mmap_data, mmap_size);
+ codegen("\
+ \n\
+ \", %1$zd, %2$zd); \n\
+ if (!skel->%3$s) \n\
+ goto cleanup; \n\
+ skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
+ ", bpf_map_mmap_sz(map), mmap_size, ident);
+ }
+ codegen("\
+ \n\
+ return skel; \n\
+ cleanup: \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__load(struct %1$s *skel) \n\
+ { \n\
+ struct bpf_load_and_run_opts opts = {}; \n\
+ int err; \n\
+ \n\
+ opts.ctx = (struct bpf_loader_ctx *)skel; \n\
+ opts.data_sz = %2$d; \n\
+ opts.data = (void *)\"\\ \n\
+ ",
+ obj_name, opts.data_sz);
+ print_hex(opts.data, opts.data_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ ");
+
+ codegen("\
+ \n\
+ opts.insns_sz = %d; \n\
+ opts.insns = (void *)\"\\ \n\
+ ",
+ opts.insns_sz);
+ print_hex(opts.insns, opts.insns_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ err = bpf_load_and_run(&opts); \n\
+ if (err < 0) \n\
+ return err; \n\
+ ", obj_name);
+ bpf_object__for_each_map(map, obj) {
+ const char *mmap_flags;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
+ mmap_flags = "PROT_READ";
+ else
+ mmap_flags = "PROT_READ | PROT_WRITE";
+
+ codegen("\
+ \n\
+ skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
+ %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
+ if (!skel->%1$s) \n\
+ return -ENOMEM; \n\
+ ",
+ ident, bpf_map_mmap_sz(map), mmap_flags);
+ }
+ codegen("\
+ \n\
+ return 0; \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_and_load(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = %1$s__open(); \n\
+ if (!skel) \n\
+ return NULL; \n\
+ if (%1$s__load(skel)) { \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ return skel; \n\
+ } \n\
+ \n\
+ ", obj_name);
+
+ codegen_asserts(obj, obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ return err;
+}
+
+static void
+codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
+{
+ struct bpf_map *map;
+ char ident[256];
+ size_t i;
+
+ if (!map_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* maps */ \n\
+ s->map_cnt = %zu; \n\
+ s->map_skel_sz = sizeof(*s->maps); \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ if (!s->maps) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ map_cnt
+ );
+ i = 0;
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ codegen("\
+ \n\
+ \n\
+ s->maps[%zu].name = \"%s\"; \n\
+ s->maps[%zu].map = &obj->maps.%s; \n\
+ ",
+ i, bpf_map__name(map), i, ident);
+ /* memory-mapped internal maps */
+ if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
+ printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
+ i, ident);
+ }
+ i++;
+ }
+}
+
+static void
+codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
+{
+ struct bpf_program *prog;
+ int i;
+
+ if (!prog_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* programs */ \n\
+ s->prog_cnt = %zu; \n\
+ s->prog_skel_sz = sizeof(*s->progs); \n\
+ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
+ if (!s->progs) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ prog_cnt
+ );
+ i = 0;
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ \n\
+ s->progs[%1$zu].name = \"%2$s\"; \n\
+ s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+
+ if (populate_links) {
+ codegen("\
+ \n\
+ s->progs[%1$zu].link = &obj->links.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+ }
+ i++;
+ }
+}
+
+static int do_skeleton(int argc, char **argv)
+{
+ char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
+ size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
+ struct bpf_object *obj = NULL;
+ const char *file;
+ char ident[256];
+ struct bpf_program *prog;
+ int fd, err = -1;
+ struct bpf_map *map;
+ struct btf *btf;
+ struct stat st;
+
+ if (!REQ_ARGS(1)) {
+ usage();
+ return -1;
+ }
+ file = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
+ }
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
+ }
+
+ NEXT_ARG();
+ }
+
+ if (argc) {
+ p_err("extra unknown arguments");
+ return -1;
+ }
+
+ if (stat(file, &st)) {
+ p_err("failed to stat() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ file_sz = st.st_size;
+ mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ p_err("failed to open() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (obj_data == MAP_FAILED) {
+ obj_data = NULL;
+ p_err("failed to mmap() %s: %s", file, strerror(errno));
+ goto out;
+ }
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
+ opts.object_name = obj_name;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ opts.kernel_log_level = 1 + 2 + 4;
+ obj = bpf_object__open_mem(obj_data, file_sz, &opts);
+ if (!obj) {
+ char err_buf[256];
+
+ err = -errno;
+ libbpf_strerror(err, err_buf, sizeof(err_buf));
+ p_err("failed to open BPF object file: %s", err_buf);
+ goto out;
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident))) {
+ p_err("ignoring unrecognized internal map '%s'...",
+ bpf_map__name(map));
+ continue;
+ }
+ map_cnt++;
+ }
+ bpf_object__for_each_program(prog, obj) {
+ prog_cnt++;
+ }
+
+ get_header_guard(header_guard, obj_name, "SKEL_H");
+ if (use_loader) {
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <bpf/skel_internal.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_loader_ctx ctx; \n\
+ ",
+ obj_name, header_guard
+ );
+ } else {
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <errno.h> \n\
+ #include <stdlib.h> \n\
+ #include <bpf/libbpf.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_object_skeleton *skeleton; \n\
+ struct bpf_object *obj; \n\
+ ",
+ obj_name, header_guard
+ );
+ }
+
+ if (map_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ if (use_loader)
+ printf("\t\tstruct bpf_map_desc %s;\n", ident);
+ else
+ printf("\t\tstruct bpf_map *%s;\n", ident);
+ }
+ printf("\t} maps;\n");
+ }
+
+ if (prog_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_program(prog, obj) {
+ if (use_loader)
+ printf("\t\tstruct bpf_prog_desc %s;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
+ }
+ printf("\t} progs;\n");
+ printf("\tstruct {\n");
+ bpf_object__for_each_program(prog, obj) {
+ if (use_loader)
+ printf("\t\tint %s_fd;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_link *%s;\n",
+ bpf_program__name(prog));
+ }
+ printf("\t} links;\n");
+ }
+
+ btf = bpf_object__btf(obj);
+ if (btf) {
+ err = codegen_datasecs(obj, obj_name);
+ if (err)
+ goto out;
+ }
+ if (use_loader) {
+ err = gen_trace(obj, obj_name, header_guard);
+ goto out;
+ }
+
+ codegen("\
+ \n\
+ \n\
+ #ifdef __cplusplus \n\
+ static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
+ static inline struct %1$s *open_and_load(); \n\
+ static inline int load(struct %1$s *skel); \n\
+ static inline int attach(struct %1$s *skel); \n\
+ static inline void detach(struct %1$s *skel); \n\
+ static inline void destroy(struct %1$s *skel); \n\
+ static inline const void *elf_bytes(size_t *sz); \n\
+ #endif /* __cplusplus */ \n\
+ }; \n\
+ \n\
+ static void \n\
+ %1$s__destroy(struct %1$s *obj) \n\
+ { \n\
+ if (!obj) \n\
+ return; \n\
+ if (obj->skeleton) \n\
+ bpf_object__destroy_skeleton(obj->skeleton);\n\
+ free(obj); \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__create_skeleton(struct %1$s *obj); \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
+ { \n\
+ struct %1$s *obj; \n\
+ int err; \n\
+ \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
+ if (!obj) { \n\
+ errno = ENOMEM; \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ err = %1$s__create_skeleton(obj); \n\
+ if (err) \n\
+ goto err_out; \n\
+ \n\
+ err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
+ if (err) \n\
+ goto err_out; \n\
+ \n\
+ return obj; \n\
+ err_out: \n\
+ %1$s__destroy(obj); \n\
+ errno = -err; \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(void) \n\
+ { \n\
+ return %1$s__open_opts(NULL); \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__load(struct %1$s *obj) \n\
+ { \n\
+ return bpf_object__load_skeleton(obj->skeleton); \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_and_load(void) \n\
+ { \n\
+ struct %1$s *obj; \n\
+ int err; \n\
+ \n\
+ obj = %1$s__open(); \n\
+ if (!obj) \n\
+ return NULL; \n\
+ err = %1$s__load(obj); \n\
+ if (err) { \n\
+ %1$s__destroy(obj); \n\
+ errno = -err; \n\
+ return NULL; \n\
+ } \n\
+ return obj; \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__attach(struct %1$s *obj) \n\
+ { \n\
+ return bpf_object__attach_skeleton(obj->skeleton); \n\
+ } \n\
+ \n\
+ static inline void \n\
+ %1$s__detach(struct %1$s *obj) \n\
+ { \n\
+ bpf_object__detach_skeleton(obj->skeleton); \n\
+ } \n\
+ ",
+ obj_name
+ );
+
+ codegen("\
+ \n\
+ \n\
+ static inline const void *%1$s__elf_bytes(size_t *sz); \n\
+ \n\
+ static inline int \n\
+ %1$s__create_skeleton(struct %1$s *obj) \n\
+ { \n\
+ struct bpf_object_skeleton *s; \n\
+ int err; \n\
+ \n\
+ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
+ if (!s) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ \n\
+ s->sz = sizeof(*s); \n\
+ s->name = \"%1$s\"; \n\
+ s->obj = &obj->obj; \n\
+ ",
+ obj_name
+ );
+
+ codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
+
+ codegen("\
+ \n\
+ \n\
+ s->data = %1$s__elf_bytes(&s->data_sz); \n\
+ \n\
+ obj->skeleton = s; \n\
+ return 0; \n\
+ err: \n\
+ bpf_object__destroy_skeleton(s); \n\
+ return err; \n\
+ } \n\
+ \n\
+ static inline const void *%1$s__elf_bytes(size_t *sz) \n\
+ { \n\
+ static const char data[] __attribute__((__aligned__(8))) = \"\\\n\
+ ",
+ obj_name
+ );
+
+ /* embed contents of BPF object file */
+ print_hex(obj_data, file_sz);
+
+ codegen("\
+ \n\
+ \"; \n\
+ \n\
+ *sz = sizeof(data) - 1; \n\
+ return (const void *)data; \n\
+ } \n\
+ \n\
+ #ifdef __cplusplus \n\
+ struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
+ struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
+ int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
+ int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
+ void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
+ void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
+ const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
+ #endif /* __cplusplus */ \n\
+ \n\
+ ",
+ obj_name);
+
+ codegen_asserts(obj, obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %1$s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ bpf_object__close(obj);
+ if (obj_data)
+ munmap(obj_data, mmap_sz);
+ close(fd);
+ return err;
+}
+
+/* Subskeletons are like skeletons, except they don't own the bpf_object,
+ * associated maps, links, etc. Instead, they know about the existence of
+ * variables, maps, programs and are able to find their locations
+ * _at runtime_ from an already loaded bpf_object.
+ *
+ * This allows for library-like BPF objects to have userspace counterparts
+ * with access to their own items without having to know anything about the
+ * final BPF object that the library was linked into.
+ */
+static int do_subskeleton(int argc, char **argv)
+{
+ char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
+ size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
+ struct bpf_object *obj = NULL;
+ const char *file, *var_name;
+ char ident[256];
+ int fd, err = -1, map_type_id;
+ const struct bpf_map *map;
+ struct bpf_program *prog;
+ struct btf *btf;
+ const struct btf_type *map_type, *var_type;
+ const struct btf_var_secinfo *var;
+ struct stat st;
+
+ if (!REQ_ARGS(1)) {
+ usage();
+ return -1;
+ }
+ file = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
+ }
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
+ }
+
+ NEXT_ARG();
+ }
+
+ if (argc) {
+ p_err("extra unknown arguments");
+ return -1;
+ }
+
+ if (use_loader) {
+ p_err("cannot use loader for subskeletons");
+ return -1;
+ }
+
+ if (stat(file, &st)) {
+ p_err("failed to stat() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ file_sz = st.st_size;
+ mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ p_err("failed to open() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (obj_data == MAP_FAILED) {
+ obj_data = NULL;
+ p_err("failed to mmap() %s: %s", file, strerror(errno));
+ goto out;
+ }
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
+
+ /* The empty object name allows us to use bpf_map__name and produce
+ * ELF section names out of it. (".data" instead of "obj.data")
+ */
+ opts.object_name = "";
+ obj = bpf_object__open_mem(obj_data, file_sz, &opts);
+ if (!obj) {
+ char err_buf[256];
+
+ libbpf_strerror(errno, err_buf, sizeof(err_buf));
+ p_err("failed to open BPF object file: %s", err_buf);
+ obj = NULL;
+ goto out;
+ }
+
+ btf = bpf_object__btf(obj);
+ if (!btf) {
+ err = -1;
+ p_err("need btf type information for %s", obj_name);
+ goto out;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ prog_cnt++;
+ }
+
+ /* First, count how many variables we have to find.
+ * We need this in advance so the subskel can allocate the right
+ * amount of storage.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ /* Also count all maps that have a name */
+ map_cnt++;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0) {
+ err = map_type_id;
+ goto out;
+ }
+ map_type = btf__type_by_id(btf, map_type_id);
+
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ var_cnt++;
+ }
+ }
+
+ get_header_guard(header_guard, obj_name, "SUBSKEL_H");
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ \n\
+ /* THIS FILE IS AUTOGENERATED! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <errno.h> \n\
+ #include <stdlib.h> \n\
+ #include <bpf/libbpf.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_object *obj; \n\
+ struct bpf_object_subskeleton *subskel; \n\
+ ", obj_name, header_guard);
+
+ if (map_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ printf("\t\tstruct bpf_map *%s;\n", ident);
+ }
+ printf("\t} maps;\n");
+ }
+
+ if (prog_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_program(prog, obj) {
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
+ }
+ printf("\t} progs;\n");
+ }
+
+ err = codegen_subskel_datasecs(obj, obj_name);
+ if (err)
+ goto out;
+
+ /* emit code that will allocate enough storage for all symbols */
+ codegen("\
+ \n\
+ \n\
+ #ifdef __cplusplus \n\
+ static inline struct %1$s *open(const struct bpf_object *src);\n\
+ static inline void destroy(struct %1$s *skel); \n\
+ #endif /* __cplusplus */ \n\
+ }; \n\
+ \n\
+ static inline void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ if (skel->subskel) \n\
+ bpf_object__destroy_subskeleton(skel->subskel);\n\
+ free(skel); \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(const struct bpf_object *src) \n\
+ { \n\
+ struct %1$s *obj; \n\
+ struct bpf_object_subskeleton *s; \n\
+ int err; \n\
+ \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
+ if (!obj) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
+ if (!s) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s->sz = sizeof(*s); \n\
+ s->obj = src; \n\
+ s->var_skel_sz = sizeof(*s->vars); \n\
+ obj->subskel = s; \n\
+ \n\
+ /* vars */ \n\
+ s->var_cnt = %2$d; \n\
+ s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
+ if (!s->vars) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ obj_name, var_cnt
+ );
+
+ /* walk through each symbol and emit the runtime representation */
+ bpf_object__for_each_map(map, obj) {
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0)
+ /* skip over internal maps with no type*/
+ continue;
+
+ map_type = btf__type_by_id(btf, map_type_id);
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+ var_name = btf__name_by_offset(btf, var_type->name_off);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* Note that we use the dot prefix in .data as the
+ * field access operator i.e. maps%s becomes maps.data
+ */
+ codegen("\
+ \n\
+ \n\
+ s->vars[%3$d].name = \"%1$s\"; \n\
+ s->vars[%3$d].map = &obj->maps.%2$s; \n\
+ s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
+ ", var_name, ident, var_idx);
+
+ var_idx++;
+ }
+ }
+
+ codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
+
+ codegen("\
+ \n\
+ \n\
+ err = bpf_object__open_subskeleton(s); \n\
+ if (err) \n\
+ goto err; \n\
+ \n\
+ return obj; \n\
+ err: \n\
+ %1$s__destroy(obj); \n\
+ errno = -err; \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ #ifdef __cplusplus \n\
+ struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
+ void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
+ #endif /* __cplusplus */ \n\
+ \n\
+ #endif /* %2$s */ \n\
+ ",
+ obj_name, header_guard);
+ err = 0;
+out:
+ bpf_object__close(obj);
+ if (obj_data)
+ munmap(obj_data, mmap_sz);
+ close(fd);
+ return err;
+}
+
+static int do_object(int argc, char **argv)
+{
+ struct bpf_linker *linker;
+ const char *output_file, *file;
+ int err = 0;
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ output_file = GET_ARG();
+
+ linker = bpf_linker__new(output_file, NULL);
+ if (!linker) {
+ p_err("failed to create BPF linker instance");
+ return -1;
+ }
+
+ while (argc) {
+ file = GET_ARG();
+
+ err = bpf_linker__add_file(linker, file, NULL);
+ if (err) {
+ p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
+ goto out;
+ }
+ }
+
+ err = bpf_linker__finalize(linker);
+ if (err) {
+ p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
+ goto out;
+ }
+
+ err = 0;
+out:
+ bpf_linker__free(linker);
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
+ " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
+ " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
+ " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-L|--use-loader} }\n"
+ "",
+ bin_name, "gen");
+
+ return 0;
+}
+
+static int btf_save_raw(const struct btf *btf, const char *path)
+{
+ const void *data;
+ FILE *f = NULL;
+ __u32 data_sz;
+ int err = 0;
+
+ data = btf__raw_data(btf, &data_sz);
+ if (!data)
+ return -ENOMEM;
+
+ f = fopen(path, "wb");
+ if (!f)
+ return -errno;
+
+ if (fwrite(data, 1, data_sz, f) != data_sz)
+ err = -errno;
+
+ fclose(f);
+ return err;
+}
+
+struct btfgen_info {
+ struct btf *src_btf;
+ struct btf *marked_btf; /* btf structure used to mark used types */
+};
+
+static size_t btfgen_hash_fn(long key, void *ctx)
+{
+ return key;
+}
+
+static bool btfgen_equal_fn(long k1, long k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+static void btfgen_free_info(struct btfgen_info *info)
+{
+ if (!info)
+ return;
+
+ btf__free(info->src_btf);
+ btf__free(info->marked_btf);
+
+ free(info);
+}
+
+static struct btfgen_info *
+btfgen_new_info(const char *targ_btf_path)
+{
+ struct btfgen_info *info;
+ int err;
+
+ info = calloc(1, sizeof(*info));
+ if (!info)
+ return NULL;
+
+ info->src_btf = btf__parse(targ_btf_path, NULL);
+ if (!info->src_btf) {
+ err = -errno;
+ p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
+ goto err_out;
+ }
+
+ info->marked_btf = btf__parse(targ_btf_path, NULL);
+ if (!info->marked_btf) {
+ err = -errno;
+ p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
+ goto err_out;
+ }
+
+ return info;
+
+err_out:
+ btfgen_free_info(info);
+ errno = -err;
+ return NULL;
+}
+
+#define MARKED UINT32_MAX
+
+static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
+{
+ const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
+ struct btf_member *m = btf_members(t) + idx;
+
+ m->name_off = MARKED;
+}
+
+static int
+btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
+{
+ const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
+ struct btf_type *cloned_type;
+ struct btf_param *param;
+ struct btf_array *array;
+ int err, i;
+
+ if (type_id == 0)
+ return 0;
+
+ /* mark type on cloned BTF as used */
+ cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
+ cloned_type->name_off = MARKED;
+
+ /* recursively mark other types needed by it */
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ break;
+ case BTF_KIND_PTR:
+ if (follow_pointers) {
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+ }
+ break;
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_TYPEDEF:
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_ARRAY:
+ array = btf_array(btf_type);
+
+ /* mark array type */
+ err = btfgen_mark_type(info, array->type, follow_pointers);
+ /* mark array's index type */
+ err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ /* mark ret type */
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+
+ /* mark parameters types */
+ param = btf_params(btf_type);
+ for (i = 0; i < btf_vlen(btf_type); i++) {
+ err = btfgen_mark_type(info, param->type, follow_pointers);
+ if (err)
+ return err;
+ param++;
+ }
+ break;
+ /* tells if some other type needs to be handled */
+ default:
+ p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ struct btf *btf = info->src_btf;
+ const struct btf_type *btf_type;
+ struct btf_member *btf_member;
+ struct btf_array *array;
+ unsigned int type_id = targ_spec->root_type_id;
+ int idx, err;
+
+ /* mark root type */
+ btf_type = btf__type_by_id(btf, type_id);
+ err = btfgen_mark_type(info, type_id, false);
+ if (err)
+ return err;
+
+ /* mark types for complex types (arrays, unions, structures) */
+ for (int i = 1; i < targ_spec->raw_len; i++) {
+ /* skip typedefs and mods */
+ while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
+ type_id = btf_type->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ }
+
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ idx = targ_spec->raw_spec[i];
+ btf_member = btf_members(btf_type) + idx;
+
+ /* mark member */
+ btfgen_mark_member(info, type_id, idx);
+
+ /* mark member's type */
+ type_id = btf_member->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ err = btfgen_mark_type(info, type_id, false);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_ARRAY:
+ array = btf_array(btf_type);
+ type_id = array->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ break;
+ default:
+ p_err("unsupported kind: %s (%d)",
+ btf_kind_str(btf_type), btf_type->type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ *
+ * The `behind_ptr` argument is used to stop marking of composite types reached
+ * through a pointer. This way, we can keep BTF size in check while providing
+ * reasonable match semantics.
+ */
+static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
+{
+ const struct btf_type *btf_type;
+ struct btf *btf = info->src_btf;
+ struct btf_type *cloned_type;
+ int i, err;
+
+ if (type_id == 0)
+ return 0;
+
+ btf_type = btf__type_by_id(btf, type_id);
+ /* mark type on cloned BTF as used */
+ cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
+ cloned_type->name_off = MARKED;
+
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(btf_type);
+ __u16 vlen = btf_vlen(btf_type);
+
+ if (behind_ptr)
+ break;
+
+ for (i = 0; i < vlen; i++, m++) {
+ /* mark member */
+ btfgen_mark_member(info, type_id, i);
+
+ /* mark member's type */
+ err = btfgen_mark_type_match(info, m->type, false);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_CONST:
+ case BTF_KIND_FWD:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
+ case BTF_KIND_PTR:
+ return btfgen_mark_type_match(info, btf_type->type, true);
+ case BTF_KIND_ARRAY: {
+ struct btf_array *array;
+
+ array = btf_array(btf_type);
+ /* mark array type */
+ err = btfgen_mark_type_match(info, array->type, false);
+ /* mark array's index type */
+ err = err ? : btfgen_mark_type_match(info, array->index_type, false);
+ if (err)
+ return err;
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ __u16 vlen = btf_vlen(btf_type);
+ struct btf_param *param;
+
+ /* mark ret type */
+ err = btfgen_mark_type_match(info, btf_type->type, false);
+ if (err)
+ return err;
+
+ /* mark parameters types */
+ param = btf_params(btf_type);
+ for (i = 0; i < vlen; i++) {
+ err = btfgen_mark_type_match(info, param->type, false);
+ if (err)
+ return err;
+ param++;
+ }
+ break;
+ }
+ /* tells if some other type needs to be handled */
+ default:
+ p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ */
+static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
+}
+
+static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type(info, targ_spec->root_type_id, true);
+}
+
+static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type(info, targ_spec->root_type_id, false);
+}
+
+static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
+{
+ switch (res->relo_kind) {
+ case BPF_CORE_FIELD_BYTE_OFFSET:
+ case BPF_CORE_FIELD_BYTE_SIZE:
+ case BPF_CORE_FIELD_EXISTS:
+ case BPF_CORE_FIELD_SIGNED:
+ case BPF_CORE_FIELD_LSHIFT_U64:
+ case BPF_CORE_FIELD_RSHIFT_U64:
+ return btfgen_record_field_relo(info, res);
+ case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
+ return 0;
+ case BPF_CORE_TYPE_ID_TARGET:
+ case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_SIZE:
+ return btfgen_record_type_relo(info, res);
+ case BPF_CORE_TYPE_MATCHES:
+ return btfgen_record_type_match_relo(info, res);
+ case BPF_CORE_ENUMVAL_EXISTS:
+ case BPF_CORE_ENUMVAL_VALUE:
+ return btfgen_record_enumval_relo(info, res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct bpf_core_cand_list *
+btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
+{
+ const struct btf_type *local_type;
+ struct bpf_core_cand_list *cands = NULL;
+ struct bpf_core_cand local_cand = {};
+ size_t local_essent_len;
+ const char *local_name;
+ int err;
+
+ local_cand.btf = local_btf;
+ local_cand.id = local_id;
+
+ local_type = btf__type_by_id(local_btf, local_id);
+ if (!local_type) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ local_name = btf__name_by_offset(local_btf, local_type->name_off);
+ if (!local_name) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ local_essent_len = bpf_core_essential_name_len(local_name);
+
+ cands = calloc(1, sizeof(*cands));
+ if (!cands)
+ return NULL;
+
+ err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
+ if (err)
+ goto err_out;
+
+ return cands;
+
+err_out:
+ bpf_core_free_cands(cands);
+ errno = -err;
+ return NULL;
+}
+
+/* Record relocation information for a single BPF object */
+static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
+{
+ const struct btf_ext_info_sec *sec;
+ const struct bpf_core_relo *relo;
+ const struct btf_ext_info *seg;
+ struct hashmap_entry *entry;
+ struct hashmap *cand_cache = NULL;
+ struct btf_ext *btf_ext = NULL;
+ unsigned int relo_idx;
+ struct btf *btf = NULL;
+ size_t i;
+ int err;
+
+ btf = btf__parse(obj_path, &btf_ext);
+ if (!btf) {
+ err = -errno;
+ p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
+ return err;
+ }
+
+ if (!btf_ext) {
+ p_err("failed to parse BPF object '%s': section %s not found",
+ obj_path, BTF_EXT_ELF_SEC);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (btf_ext->core_relo_info.len == 0) {
+ err = 0;
+ goto out;
+ }
+
+ cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
+ if (IS_ERR(cand_cache)) {
+ err = PTR_ERR(cand_cache);
+ goto out;
+ }
+
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
+ struct bpf_core_spec specs_scratch[3] = {};
+ struct bpf_core_relo_res targ_res = {};
+ struct bpf_core_cand_list *cands = NULL;
+ const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
+
+ if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
+ !hashmap__find(cand_cache, relo->type_id, &cands)) {
+ cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
+ if (!cands) {
+ err = -errno;
+ goto out;
+ }
+
+ err = hashmap__set(cand_cache, relo->type_id, cands,
+ NULL, NULL);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
+ specs_scratch, &targ_res);
+ if (err)
+ goto out;
+
+ /* specs_scratch[2] is the target spec */
+ err = btfgen_record_reloc(info, &specs_scratch[2]);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ btf__free(btf);
+ btf_ext__free(btf_ext);
+
+ if (!IS_ERR_OR_NULL(cand_cache)) {
+ hashmap__for_each_entry(cand_cache, entry, i) {
+ bpf_core_free_cands(entry->pvalue);
+ }
+ hashmap__free(cand_cache);
+ }
+
+ return err;
+}
+
+static int btfgen_remap_id(__u32 *type_id, void *ctx)
+{
+ unsigned int *ids = ctx;
+
+ *type_id = ids[*type_id];
+
+ return 0;
+}
+
+/* Generate BTF from relocation information previously recorded */
+static struct btf *btfgen_get_btf(struct btfgen_info *info)
+{
+ struct btf *btf_new = NULL;
+ unsigned int *ids = NULL;
+ unsigned int i, n = btf__type_cnt(info->marked_btf);
+ int err = 0;
+
+ btf_new = btf__new_empty();
+ if (!btf_new) {
+ err = -errno;
+ goto err_out;
+ }
+
+ ids = calloc(n, sizeof(*ids));
+ if (!ids) {
+ err = -errno;
+ goto err_out;
+ }
+
+ /* first pass: add all marked types to btf_new and add their new ids to the ids map */
+ for (i = 1; i < n; i++) {
+ const struct btf_type *cloned_type, *type;
+ const char *name;
+ int new_id;
+
+ cloned_type = btf__type_by_id(info->marked_btf, i);
+
+ if (cloned_type->name_off != MARKED)
+ continue;
+
+ type = btf__type_by_id(info->src_btf, i);
+
+ /* add members for struct and union */
+ if (btf_is_composite(type)) {
+ struct btf_member *cloned_m, *m;
+ unsigned short vlen;
+ int idx_src;
+
+ name = btf__str_by_offset(info->src_btf, type->name_off);
+
+ if (btf_is_struct(type))
+ err = btf__add_struct(btf_new, name, type->size);
+ else
+ err = btf__add_union(btf_new, name, type->size);
+
+ if (err < 0)
+ goto err_out;
+ new_id = err;
+
+ cloned_m = btf_members(cloned_type);
+ m = btf_members(type);
+ vlen = btf_vlen(cloned_type);
+ for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
+ /* add only members that are marked as used */
+ if (cloned_m->name_off != MARKED)
+ continue;
+
+ name = btf__str_by_offset(info->src_btf, m->name_off);
+ err = btf__add_field(btf_new, name, m->type,
+ btf_member_bit_offset(cloned_type, idx_src),
+ btf_member_bitfield_size(cloned_type, idx_src));
+ if (err < 0)
+ goto err_out;
+ }
+ } else {
+ err = btf__add_type(btf_new, info->src_btf, type);
+ if (err < 0)
+ goto err_out;
+ new_id = err;
+ }
+
+ /* add ID mapping */
+ ids[i] = new_id;
+ }
+
+ /* second pass: fix up type ids */
+ for (i = 1; i < btf__type_cnt(btf_new); i++) {
+ struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
+
+ err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
+ if (err)
+ goto err_out;
+ }
+
+ free(ids);
+ return btf_new;
+
+err_out:
+ btf__free(btf_new);
+ free(ids);
+ errno = -err;
+ return NULL;
+}
+
+/* Create minimized BTF file for a set of BPF objects.
+ *
+ * The BTFGen algorithm is divided in two main parts: (1) collect the
+ * BTF types that are involved in relocations and (2) generate the BTF
+ * object using the collected types.
+ *
+ * In order to collect the types involved in the relocations, we parse
+ * the BTF and BTF.ext sections of the BPF objects and use
+ * bpf_core_calc_relo_insn() to get the target specification, this
+ * indicates how the types and fields are used in a relocation.
+ *
+ * Types are recorded in different ways according to the kind of the
+ * relocation. For field-based relocations only the members that are
+ * actually used are saved in order to reduce the size of the generated
+ * BTF file. For type-based relocations empty struct / unions are
+ * generated and for enum-based relocations the whole type is saved.
+ *
+ * The second part of the algorithm generates the BTF object. It creates
+ * an empty BTF object and fills it with the types recorded in the
+ * previous step. This function takes care of only adding the structure
+ * and union members that were marked as used and it also fixes up the
+ * type IDs on the generated BTF object.
+ */
+static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
+{
+ struct btfgen_info *info;
+ struct btf *btf_new = NULL;
+ int err, i;
+
+ info = btfgen_new_info(src_btf);
+ if (!info) {
+ err = -errno;
+ p_err("failed to allocate info structure: %s", strerror(errno));
+ goto out;
+ }
+
+ for (i = 0; objspaths[i] != NULL; i++) {
+ err = btfgen_record_obj(info, objspaths[i]);
+ if (err) {
+ p_err("error recording relocations for %s: %s", objspaths[i],
+ strerror(errno));
+ goto out;
+ }
+ }
+
+ btf_new = btfgen_get_btf(info);
+ if (!btf_new) {
+ err = -errno;
+ p_err("error generating BTF: %s", strerror(errno));
+ goto out;
+ }
+
+ err = btf_save_raw(btf_new, dst_btf);
+ if (err) {
+ p_err("error saving btf file: %s", strerror(errno));
+ goto out;
+ }
+
+out:
+ btf__free(btf_new);
+ btfgen_free_info(info);
+
+ return err;
+}
+
+static int do_min_core_btf(int argc, char **argv)
+{
+ const char *input, *output, **objs;
+ int i, err;
+
+ if (!REQ_ARGS(3)) {
+ usage();
+ return -1;
+ }
+
+ input = GET_ARG();
+ output = GET_ARG();
+
+ objs = (const char **) calloc(argc + 1, sizeof(*objs));
+ if (!objs) {
+ p_err("failed to allocate array for object names");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ while (argc)
+ objs[i++] = GET_ARG();
+
+ err = minimize_btf(input, output, objs);
+ free(objs);
+ return err;
+}
+
+static const struct cmd cmds[] = {
+ { "object", do_object },
+ { "skeleton", do_skeleton },
+ { "subskeleton", do_subskeleton },
+ { "min_core_btf", do_min_core_btf},
+ { "help", do_help },
+ { 0 }
+};
+
+int do_gen(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
new file mode 100644
index 0000000000..6b0e5202ca
--- /dev/null
+++ b/tools/bpf/bpftool/iter.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2020 Facebook
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <unistd.h>
+#include <linux/err.h>
+#include <bpf/libbpf.h>
+
+#include "main.h"
+
+static int do_pin(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
+ union bpf_iter_link_info linfo;
+ const char *objfile, *path;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ int err = -1, map_fd = -1;
+
+ if (!REQ_ARGS(2))
+ usage();
+
+ objfile = GET_ARG();
+ path = GET_ARG();
+
+ /* optional arguments */
+ if (argc) {
+ if (is_prefix(*argv, "map")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(2)) {
+ p_err("incorrect map spec");
+ return -1;
+ }
+
+ map_fd = map_parse_fd(&argc, &argv);
+ if (map_fd < 0)
+ return -1;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = map_fd;
+ iter_opts.link_info = &linfo;
+ iter_opts.link_info_len = sizeof(linfo);
+ }
+ }
+
+ obj = bpf_object__open(objfile);
+ if (!obj) {
+ err = -errno;
+ p_err("can't open objfile %s", objfile);
+ goto close_map_fd;
+ }
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("can't load objfile %s", objfile);
+ goto close_obj;
+ }
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (!prog) {
+ err = -errno;
+ p_err("can't find bpf program in objfile %s", objfile);
+ goto close_obj;
+ }
+
+ link = bpf_program__attach_iter(prog, &iter_opts);
+ if (!link) {
+ err = -errno;
+ p_err("attach_iter failed for program %s",
+ bpf_program__name(prog));
+ goto close_obj;
+ }
+
+ err = mount_bpffs_for_pin(path, false);
+ if (err)
+ goto close_link;
+
+ err = bpf_link__pin(link, path);
+ if (err) {
+ p_err("pin_iter failed for program %s to path %s",
+ bpf_program__name(prog), path);
+ goto close_link;
+ }
+
+close_link:
+ bpf_link__destroy(link);
+close_obj:
+ bpf_object__close(obj);
+close_map_fd:
+ if (map_fd >= 0)
+ close(map_fd);
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ fprintf(stderr,
+ "Usage: %1$s %2$s pin OBJ PATH [map MAP]\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_MAP "\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
+ bin_name, "iter");
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "help", do_help },
+ { "pin", do_pin },
+ { 0 }
+};
+
+int do_iter(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
new file mode 100644
index 0000000000..7b8d9ec89e
--- /dev/null
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Based on:
+ *
+ * Minimal BPF JIT image disassembler
+ *
+ * Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
+ * debugging or verification purposes.
+ *
+ * Copyright 2013 Daniel Borkmann <daniel@iogearbox.net>
+ * Licensed under the GNU General Public License, version 2.0 (GPLv2)
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <limits.h>
+#include <bpf/libbpf.h>
+
+#ifdef HAVE_LLVM_SUPPORT
+#include <llvm-c/Core.h>
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/TargetMachine.h>
+#endif
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#include <bfd.h>
+#include <dis-asm.h>
+#include <tools/dis-asm-compat.h>
+#endif
+
+#include "json_writer.h"
+#include "main.h"
+
+static int oper_count;
+
+#ifdef HAVE_LLVM_SUPPORT
+#define DISASM_SPACER
+
+typedef LLVMDisasmContextRef disasm_ctx_t;
+
+static int printf_json(char *s)
+{
+ s = strtok(s, " \t");
+ jsonw_string_field(json_wtr, "operation", s);
+
+ jsonw_name(json_wtr, "operands");
+ jsonw_start_array(json_wtr);
+ oper_count = 1;
+
+ while ((s = strtok(NULL, " \t,()")) != 0) {
+ jsonw_string(json_wtr, s);
+ oper_count++;
+ }
+ return 0;
+}
+
+/* This callback to set the ref_type is necessary to have the LLVM disassembler
+ * print PC-relative addresses instead of byte offsets for branch instruction
+ * targets.
+ */
+static const char *
+symbol_lookup_callback(__maybe_unused void *disasm_info,
+ __maybe_unused uint64_t ref_value,
+ uint64_t *ref_type, __maybe_unused uint64_t ref_PC,
+ __maybe_unused const char **ref_name)
+{
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ return NULL;
+}
+
+static int
+init_context(disasm_ctx_t *ctx, const char *arch,
+ __maybe_unused const char *disassembler_options,
+ __maybe_unused unsigned char *image, __maybe_unused ssize_t len)
+{
+ char *triple;
+
+ if (arch)
+ triple = LLVMNormalizeTargetTriple(arch);
+ else
+ triple = LLVMGetDefaultTargetTriple();
+ if (!triple) {
+ p_err("Failed to retrieve triple");
+ return -1;
+ }
+ *ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
+ LLVMDisposeMessage(triple);
+
+ if (!*ctx) {
+ p_err("Failed to create disassembler");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ LLVMDisposeMessage(*ctx);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc)
+{
+ char buf[256];
+ int count;
+
+ count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc,
+ buf, sizeof(buf));
+ if (json_output)
+ printf_json(buf);
+ else
+ printf("%s", buf);
+
+ return count;
+}
+
+int disasm_init(void)
+{
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ return 0;
+}
+#endif /* HAVE_LLVM_SUPPORT */
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#define DISASM_SPACER "\t"
+
+typedef struct {
+ struct disassemble_info *info;
+ disassembler_ftype disassemble;
+ bfd *bfdf;
+} disasm_ctx_t;
+
+static int get_exec_path(char *tpath, size_t size)
+{
+ const char *path = "/proc/self/exe";
+ ssize_t len;
+
+ len = readlink(path, tpath, size - 1);
+ if (len <= 0)
+ return -1;
+
+ tpath[len] = 0;
+
+ return 0;
+}
+
+static int printf_json(void *out, const char *fmt, va_list ap)
+{
+ char *s;
+ int err;
+
+ err = vasprintf(&s, fmt, ap);
+ if (err < 0)
+ return -1;
+
+ if (!oper_count) {
+ int i;
+
+ /* Strip trailing spaces */
+ i = strlen(s) - 1;
+ while (s[i] == ' ')
+ s[i--] = '\0';
+
+ jsonw_string_field(json_wtr, "operation", s);
+ jsonw_name(json_wtr, "operands");
+ jsonw_start_array(json_wtr);
+ oper_count++;
+ } else if (!strcmp(fmt, ",")) {
+ /* Skip */
+ } else {
+ jsonw_string(json_wtr, s);
+ oper_count++;
+ }
+ free(s);
+ return 0;
+}
+
+static int fprintf_json(void *out, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+static int fprintf_json_styled(void *out,
+ enum disassembler_style style __maybe_unused,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+static int init_context(disasm_ctx_t *ctx, const char *arch,
+ const char *disassembler_options,
+ unsigned char *image, ssize_t len)
+{
+ struct disassemble_info *info;
+ char tpath[PATH_MAX];
+ bfd *bfdf;
+
+ memset(tpath, 0, sizeof(tpath));
+ if (get_exec_path(tpath, sizeof(tpath))) {
+ p_err("failed to create disassembler (get_exec_path)");
+ return -1;
+ }
+
+ ctx->bfdf = bfd_openr(tpath, NULL);
+ if (!ctx->bfdf) {
+ p_err("failed to create disassembler (bfd_openr)");
+ return -1;
+ }
+ if (!bfd_check_format(ctx->bfdf, bfd_object)) {
+ p_err("failed to create disassembler (bfd_check_format)");
+ goto err_close;
+ }
+ bfdf = ctx->bfdf;
+
+ ctx->info = malloc(sizeof(struct disassemble_info));
+ if (!ctx->info) {
+ p_err("mem alloc failed");
+ goto err_close;
+ }
+ info = ctx->info;
+
+ if (json_output)
+ init_disassemble_info_compat(info, stdout,
+ (fprintf_ftype) fprintf_json,
+ fprintf_json_styled);
+ else
+ init_disassemble_info_compat(info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
+
+ /* Update architecture info for offload. */
+ if (arch) {
+ const bfd_arch_info_type *inf = bfd_scan_arch(arch);
+
+ if (inf) {
+ bfdf->arch_info = inf;
+ } else {
+ p_err("No libbfd support for %s", arch);
+ goto err_free;
+ }
+ }
+
+ info->arch = bfd_get_arch(bfdf);
+ info->mach = bfd_get_mach(bfdf);
+ if (disassembler_options)
+ info->disassembler_options = disassembler_options;
+ info->buffer = image;
+ info->buffer_length = len;
+
+ disassemble_init_for_target(info);
+
+#ifdef DISASM_FOUR_ARGS_SIGNATURE
+ ctx->disassemble = disassembler(info->arch,
+ bfd_big_endian(bfdf),
+ info->mach,
+ bfdf);
+#else
+ ctx->disassemble = disassembler(bfdf);
+#endif
+ if (!ctx->disassemble) {
+ p_err("failed to create disassembler");
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ free(info);
+err_close:
+ bfd_close(ctx->bfdf);
+ return -1;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ free(ctx->info);
+ bfd_close(ctx->bfdf);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image,
+ __maybe_unused ssize_t len, int pc)
+{
+ return ctx->disassemble(pc, ctx->info);
+}
+
+int disasm_init(void)
+{
+ bfd_init();
+ return 0;
+}
+#endif /* HAVE_LIBBPFD_SUPPORT */
+
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
+{
+ const struct bpf_line_info *linfo = NULL;
+ unsigned int nr_skip = 0;
+ int count, i, pc = 0;
+ disasm_ctx_t ctx;
+
+ if (!len)
+ return -1;
+
+ if (init_context(&ctx, arch, disassembler_options, image, len))
+ return -1;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ do {
+ if (prog_linfo) {
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ func_ksym + pc,
+ func_idx,
+ nr_skip);
+ if (linfo)
+ nr_skip++;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ oper_count = 0;
+ if (linfo)
+ btf_dump_linfo_json(btf, linfo, linum);
+ jsonw_name(json_wtr, "pc");
+ jsonw_printf(json_wtr, "\"0x%x\"", pc);
+ } else {
+ if (linfo)
+ btf_dump_linfo_plain(btf, linfo, "; ",
+ linum);
+ printf("%4x:" DISASM_SPACER, pc);
+ }
+
+ count = disassemble_insn(&ctx, image, len, pc);
+
+ if (json_output) {
+ /* Operand array, was started in fprintf_json. Before
+ * that, make sure we have a _null_ value if no operand
+ * other than operation code was present.
+ */
+ if (oper_count == 1)
+ jsonw_null(json_wtr);
+ jsonw_end_array(json_wtr);
+ }
+
+ if (opcodes) {
+ if (json_output) {
+ jsonw_name(json_wtr, "opcodes");
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < count; ++i)
+ jsonw_printf(json_wtr, "\"0x%02hhx\"",
+ (uint8_t)image[pc + i]);
+ jsonw_end_array(json_wtr);
+ } else {
+ printf("\n\t");
+ for (i = 0; i < count; ++i)
+ printf("%02x ",
+ (uint8_t)image[pc + i]);
+ }
+ }
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+
+ pc += count;
+ } while (count > 0 && pc < len);
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ destroy_context(&ctx);
+
+ return 0;
+}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
new file mode 100644
index 0000000000..be379613d1
--- /dev/null
+++ b/tools/bpf/bpftool/json_writer.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause)
+/*
+ * Simple streaming JSON writer
+ *
+ * This takes care of the annoying bits of JSON syntax like the commas
+ * after elements
+ *
+ * Authors: Stephen Hemminger <stephen@networkplumber.org>
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <malloc.h>
+#include <inttypes.h>
+#include <stdint.h>
+
+#include "json_writer.h"
+
+struct json_writer {
+ FILE *out; /* output file */
+ unsigned depth; /* nesting */
+ bool pretty; /* optional whitepace */
+ char sep; /* either nul or comma */
+};
+
+/* indentation for pretty print */
+static void jsonw_indent(json_writer_t *self)
+{
+ unsigned i;
+ for (i = 0; i < self->depth; ++i)
+ fputs(" ", self->out);
+}
+
+/* end current line and indent if pretty printing */
+static void jsonw_eol(json_writer_t *self)
+{
+ if (!self->pretty)
+ return;
+
+ putc('\n', self->out);
+ jsonw_indent(self);
+}
+
+/* If current object is not empty print a comma */
+static void jsonw_eor(json_writer_t *self)
+{
+ if (self->sep != '\0')
+ putc(self->sep, self->out);
+ self->sep = ',';
+}
+
+
+/* Output JSON encoded string */
+/* Handles C escapes, does not do Unicode */
+static void jsonw_puts(json_writer_t *self, const char *str)
+{
+ putc('"', self->out);
+ for (; *str; ++str)
+ switch (*str) {
+ case '\t':
+ fputs("\\t", self->out);
+ break;
+ case '\n':
+ fputs("\\n", self->out);
+ break;
+ case '\r':
+ fputs("\\r", self->out);
+ break;
+ case '\f':
+ fputs("\\f", self->out);
+ break;
+ case '\b':
+ fputs("\\b", self->out);
+ break;
+ case '\\':
+ fputs("\\\\", self->out);
+ break;
+ case '"':
+ fputs("\\\"", self->out);
+ break;
+ default:
+ putc(*str, self->out);
+ }
+ putc('"', self->out);
+}
+
+/* Create a new JSON stream */
+json_writer_t *jsonw_new(FILE *f)
+{
+ json_writer_t *self = malloc(sizeof(*self));
+ if (self) {
+ self->out = f;
+ self->depth = 0;
+ self->pretty = false;
+ self->sep = '\0';
+ }
+ return self;
+}
+
+/* End output to JSON stream */
+void jsonw_destroy(json_writer_t **self_p)
+{
+ json_writer_t *self = *self_p;
+
+ assert(self->depth == 0);
+ fputs("\n", self->out);
+ fflush(self->out);
+ free(self);
+ *self_p = NULL;
+}
+
+void jsonw_pretty(json_writer_t *self, bool on)
+{
+ self->pretty = on;
+}
+
+void jsonw_reset(json_writer_t *self)
+{
+ assert(self->depth == 0);
+ self->sep = '\0';
+}
+
+/* Basic blocks */
+static void jsonw_begin(json_writer_t *self, int c)
+{
+ jsonw_eor(self);
+ putc(c, self->out);
+ ++self->depth;
+ self->sep = '\0';
+}
+
+static void jsonw_end(json_writer_t *self, int c)
+{
+ assert(self->depth > 0);
+
+ --self->depth;
+ if (self->sep != '\0')
+ jsonw_eol(self);
+ putc(c, self->out);
+ self->sep = ',';
+}
+
+
+/* Add a JSON property name */
+void jsonw_name(json_writer_t *self, const char *name)
+{
+ jsonw_eor(self);
+ jsonw_eol(self);
+ self->sep = '\0';
+ jsonw_puts(self, name);
+ putc(':', self->out);
+ if (self->pretty)
+ putc(' ', self->out);
+}
+
+void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
+{
+ jsonw_eor(self);
+ putc('"', self->out);
+ vfprintf(self->out, fmt, ap);
+ putc('"', self->out);
+}
+
+void jsonw_printf(json_writer_t *self, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ jsonw_eor(self);
+ vfprintf(self->out, fmt, ap);
+ va_end(ap);
+}
+
+/* Collections */
+void jsonw_start_object(json_writer_t *self)
+{
+ jsonw_begin(self, '{');
+}
+
+void jsonw_end_object(json_writer_t *self)
+{
+ jsonw_end(self, '}');
+}
+
+void jsonw_start_array(json_writer_t *self)
+{
+ jsonw_begin(self, '[');
+}
+
+void jsonw_end_array(json_writer_t *self)
+{
+ jsonw_end(self, ']');
+}
+
+/* JSON value types */
+void jsonw_string(json_writer_t *self, const char *value)
+{
+ jsonw_eor(self);
+ jsonw_puts(self, value);
+}
+
+void jsonw_bool(json_writer_t *self, bool val)
+{
+ jsonw_printf(self, "%s", val ? "true" : "false");
+}
+
+void jsonw_null(json_writer_t *self)
+{
+ jsonw_printf(self, "null");
+}
+
+void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num)
+{
+ jsonw_printf(self, fmt, num);
+}
+
+#ifdef notused
+void jsonw_float(json_writer_t *self, double num)
+{
+ jsonw_printf(self, "%g", num);
+}
+#endif
+
+void jsonw_hu(json_writer_t *self, unsigned short num)
+{
+ jsonw_printf(self, "%hu", num);
+}
+
+void jsonw_uint(json_writer_t *self, uint64_t num)
+{
+ jsonw_printf(self, "%"PRIu64, num);
+}
+
+void jsonw_lluint(json_writer_t *self, unsigned long long int num)
+{
+ jsonw_printf(self, "%llu", num);
+}
+
+void jsonw_int(json_writer_t *self, int64_t num)
+{
+ jsonw_printf(self, "%"PRId64, num);
+}
+
+/* Basic name/value objects */
+void jsonw_string_field(json_writer_t *self, const char *prop, const char *val)
+{
+ jsonw_name(self, prop);
+ jsonw_string(self, val);
+}
+
+void jsonw_bool_field(json_writer_t *self, const char *prop, bool val)
+{
+ jsonw_name(self, prop);
+ jsonw_bool(self, val);
+}
+
+#ifdef notused
+void jsonw_float_field(json_writer_t *self, const char *prop, double val)
+{
+ jsonw_name(self, prop);
+ jsonw_float(self, val);
+}
+#endif
+
+void jsonw_float_field_fmt(json_writer_t *self,
+ const char *prop,
+ const char *fmt,
+ double val)
+{
+ jsonw_name(self, prop);
+ jsonw_float_fmt(self, fmt, val);
+}
+
+void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num)
+{
+ jsonw_name(self, prop);
+ jsonw_uint(self, num);
+}
+
+void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num)
+{
+ jsonw_name(self, prop);
+ jsonw_hu(self, num);
+}
+
+void jsonw_lluint_field(json_writer_t *self,
+ const char *prop,
+ unsigned long long int num)
+{
+ jsonw_name(self, prop);
+ jsonw_lluint(self, num);
+}
+
+void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num)
+{
+ jsonw_name(self, prop);
+ jsonw_int(self, num);
+}
+
+void jsonw_null_field(json_writer_t *self, const char *prop)
+{
+ jsonw_name(self, prop);
+ jsonw_null(self);
+}
+
+#ifdef TEST
+int main(int argc, char **argv)
+{
+ json_writer_t *wr = jsonw_new(stdout);
+
+ jsonw_start_object(wr);
+ jsonw_pretty(wr, true);
+ jsonw_name(wr, "Vyatta");
+ jsonw_start_object(wr);
+ jsonw_string_field(wr, "url", "http://vyatta.com");
+ jsonw_uint_field(wr, "downloads", 2000000ul);
+ jsonw_float_field(wr, "stock", 8.16);
+
+ jsonw_name(wr, "ARGV");
+ jsonw_start_array(wr);
+ while (--argc)
+ jsonw_string(wr, *++argv);
+ jsonw_end_array(wr);
+
+ jsonw_name(wr, "empty");
+ jsonw_start_array(wr);
+ jsonw_end_array(wr);
+
+ jsonw_name(wr, "NIL");
+ jsonw_start_object(wr);
+ jsonw_end_object(wr);
+
+ jsonw_null_field(wr, "my_null");
+
+ jsonw_name(wr, "special chars");
+ jsonw_start_array(wr);
+ jsonw_string_field(wr, "slash", "/");
+ jsonw_string_field(wr, "newline", "\n");
+ jsonw_string_field(wr, "tab", "\t");
+ jsonw_string_field(wr, "ff", "\f");
+ jsonw_string_field(wr, "quote", "\"");
+ jsonw_string_field(wr, "tick", "\'");
+ jsonw_string_field(wr, "backslash", "\\");
+ jsonw_end_array(wr);
+
+ jsonw_end_object(wr);
+
+ jsonw_end_object(wr);
+ jsonw_destroy(&wr);
+ return 0;
+}
+
+#endif
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
new file mode 100644
index 0000000000..5aaffd3b83
--- /dev/null
+++ b/tools/bpf/bpftool/json_writer.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Simple streaming JSON writer
+ *
+ * This takes care of the annoying bits of JSON syntax like the commas
+ * after elements
+ *
+ * Authors: Stephen Hemminger <stephen@networkplumber.org>
+ */
+
+#ifndef _JSON_WRITER_H_
+#define _JSON_WRITER_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <linux/compiler.h>
+
+/* Opaque class structure */
+typedef struct json_writer json_writer_t;
+
+/* Create a new JSON stream */
+json_writer_t *jsonw_new(FILE *f);
+/* End output to JSON stream */
+void jsonw_destroy(json_writer_t **self_p);
+
+/* Cause output to have pretty whitespace */
+void jsonw_pretty(json_writer_t *self, bool on);
+
+/* Reset separator to create new JSON */
+void jsonw_reset(json_writer_t *self);
+
+/* Add property name */
+void jsonw_name(json_writer_t *self, const char *name);
+
+/* Add value */
+void __printf(2, 0) jsonw_vprintf_enquote(json_writer_t *self, const char *fmt,
+ va_list ap);
+void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...);
+void jsonw_string(json_writer_t *self, const char *value);
+void jsonw_bool(json_writer_t *self, bool value);
+void jsonw_float(json_writer_t *self, double number);
+void jsonw_float_fmt(json_writer_t *self, const char *fmt, double num);
+void jsonw_uint(json_writer_t *self, uint64_t number);
+void jsonw_hu(json_writer_t *self, unsigned short number);
+void jsonw_int(json_writer_t *self, int64_t number);
+void jsonw_null(json_writer_t *self);
+void jsonw_lluint(json_writer_t *self, unsigned long long int num);
+
+/* Useful Combinations of name and value */
+void jsonw_string_field(json_writer_t *self, const char *prop, const char *val);
+void jsonw_bool_field(json_writer_t *self, const char *prop, bool value);
+void jsonw_float_field(json_writer_t *self, const char *prop, double num);
+void jsonw_uint_field(json_writer_t *self, const char *prop, uint64_t num);
+void jsonw_hu_field(json_writer_t *self, const char *prop, unsigned short num);
+void jsonw_int_field(json_writer_t *self, const char *prop, int64_t num);
+void jsonw_null_field(json_writer_t *self, const char *prop);
+void jsonw_lluint_field(json_writer_t *self, const char *prop,
+ unsigned long long int num);
+void jsonw_float_field_fmt(json_writer_t *self, const char *prop,
+ const char *fmt, double val);
+
+/* Collections */
+void jsonw_start_object(json_writer_t *self);
+void jsonw_end_object(json_writer_t *self);
+
+void jsonw_start_array(json_writer_t *self);
+void jsonw_end_array(json_writer_t *self);
+
+/* Override default exception handling */
+typedef void (jsonw_err_handler_fn)(const char *);
+
+#endif /* _JSON_WRITER_H_ */
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
new file mode 100644
index 0000000000..2e5c231e08
--- /dev/null
+++ b/tools/bpf/bpftool/link.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_arp.h>
+#include <linux/perf_event.h>
+#include <net/if.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
+
+#include "json_writer.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+#define PERF_HW_CACHE_LEN 128
+
+static struct hashmap *link_table;
+static struct dump_data dd;
+
+static const char *perf_type_name[PERF_TYPE_MAX] = {
+ [PERF_TYPE_HARDWARE] = "hardware",
+ [PERF_TYPE_SOFTWARE] = "software",
+ [PERF_TYPE_TRACEPOINT] = "tracepoint",
+ [PERF_TYPE_HW_CACHE] = "hw-cache",
+ [PERF_TYPE_RAW] = "raw",
+ [PERF_TYPE_BREAKPOINT] = "breakpoint",
+};
+
+const char *event_symbols_hw[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles",
+ [PERF_COUNT_HW_INSTRUCTIONS] = "instructions",
+ [PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references",
+ [PERF_COUNT_HW_CACHE_MISSES] = "cache-misses",
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions",
+ [PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses",
+ [PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles",
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend",
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend",
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles",
+};
+
+const char *event_symbols_sw[PERF_COUNT_SW_MAX] = {
+ [PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock",
+ [PERF_COUNT_SW_TASK_CLOCK] = "task-clock",
+ [PERF_COUNT_SW_PAGE_FAULTS] = "page-faults",
+ [PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches",
+ [PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations",
+ [PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults",
+ [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults",
+ [PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults",
+ [PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults",
+ [PERF_COUNT_SW_DUMMY] = "dummy",
+ [PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output",
+ [PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches",
+};
+
+const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = {
+ [PERF_COUNT_HW_CACHE_L1D] = "L1-dcache",
+ [PERF_COUNT_HW_CACHE_L1I] = "L1-icache",
+ [PERF_COUNT_HW_CACHE_LL] = "LLC",
+ [PERF_COUNT_HW_CACHE_DTLB] = "dTLB",
+ [PERF_COUNT_HW_CACHE_ITLB] = "iTLB",
+ [PERF_COUNT_HW_CACHE_BPU] = "branch",
+ [PERF_COUNT_HW_CACHE_NODE] = "node",
+};
+
+const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = {
+ [PERF_COUNT_HW_CACHE_OP_READ] = "load",
+ [PERF_COUNT_HW_CACHE_OP_WRITE] = "store",
+ [PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch",
+};
+
+const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs",
+ [PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses",
+};
+
+#define perf_event_name(array, id) ({ \
+ const char *event_str = NULL; \
+ \
+ if ((id) < ARRAY_SIZE(array)) \
+ event_str = array[id]; \
+ event_str; \
+})
+
+static int link_parse_fd(int *argc, char ***argv)
+{
+ int fd;
+
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0)
+ p_err("failed to get link with ID %d: %s", id, strerror(errno));
+ return fd;
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ return open_obj_pinned_any(path, BPF_OBJ_LINK);
+ }
+
+ p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+static void
+show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ const char *link_type_str;
+
+ jsonw_uint_field(wtr, "id", info->id);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ jsonw_string_field(wtr, "type", link_type_str);
+ else
+ jsonw_uint_field(wtr, "type", info->type);
+
+ jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
+}
+
+static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
+{
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ jsonw_string_field(wtr, "attach_type", attach_type_str);
+ else
+ jsonw_uint_field(wtr, "attach_type", attach_type);
+}
+
+static void show_link_ifindex_json(__u32 ifindex, json_writer_t *wtr)
+{
+ char devname[IF_NAMESIZE] = "(unknown)";
+
+ if (ifindex)
+ if_indextoname(ifindex, devname);
+ else
+ snprintf(devname, sizeof(devname), "(detached)");
+ jsonw_string_field(wtr, "devname", devname);
+ jsonw_uint_field(wtr, "ifindex", ifindex);
+}
+
+static bool is_iter_map_target(const char *target_name)
+{
+ return strcmp(target_name, "bpf_map_elem") == 0 ||
+ strcmp(target_name, "bpf_sk_storage_map") == 0;
+}
+
+static bool is_iter_cgroup_target(const char *target_name)
+{
+ return strcmp(target_name, "cgroup") == 0;
+}
+
+static const char *cgroup_order_string(__u32 order)
+{
+ switch (order) {
+ case BPF_CGROUP_ITER_ORDER_UNSPEC:
+ return "order_unspec";
+ case BPF_CGROUP_ITER_SELF_ONLY:
+ return "self_only";
+ case BPF_CGROUP_ITER_DESCENDANTS_PRE:
+ return "descendants_pre";
+ case BPF_CGROUP_ITER_DESCENDANTS_POST:
+ return "descendants_post";
+ case BPF_CGROUP_ITER_ANCESTORS_UP:
+ return "ancestors_up";
+ default: /* won't happen */
+ return "unknown";
+ }
+}
+
+static bool is_iter_task_target(const char *target_name)
+{
+ return strcmp(target_name, "task") == 0 ||
+ strcmp(target_name, "task_file") == 0 ||
+ strcmp(target_name, "task_vma") == 0;
+}
+
+static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ jsonw_string_field(wtr, "target_name", target_name);
+
+ if (is_iter_map_target(target_name))
+ jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
+ else if (is_iter_task_target(target_name)) {
+ if (info->iter.task.tid)
+ jsonw_uint_field(wtr, "tid", info->iter.task.tid);
+ else if (info->iter.task.pid)
+ jsonw_uint_field(wtr, "pid", info->iter.task.pid);
+ }
+
+ if (is_iter_cgroup_target(target_name)) {
+ jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
+ jsonw_string_field(wtr, "order",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
+}
+
+void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
+{
+ jsonw_uint_field(json_wtr, "pf",
+ info->netfilter.pf);
+ jsonw_uint_field(json_wtr, "hook",
+ info->netfilter.hooknum);
+ jsonw_int_field(json_wtr, "prio",
+ info->netfilter.priority);
+ jsonw_uint_field(json_wtr, "flags",
+ info->netfilter.flags);
+}
+
+static int get_prog_info(int prog_id, struct bpf_prog_info *info)
+{
+ __u32 len = sizeof(*info);
+ int err, prog_fd;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0)
+ return prog_fd;
+
+ memset(info, 0, sizeof(*info));
+ err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
+ if (err)
+ p_err("can't get prog info: %s", strerror(errno));
+ close(prog_fd);
+ return err;
+}
+
+static int cmp_u64(const void *A, const void *B)
+{
+ const __u64 *a = A, *b = B;
+
+ return *a - *b;
+}
+
+static void
+show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ __u32 i, j = 0;
+ __u64 *addrs;
+
+ jsonw_bool_field(json_wtr, "retprobe",
+ info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
+ jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
+ jsonw_name(json_wtr, "funcs");
+ jsonw_start_array(json_wtr);
+ addrs = u64_to_ptr(info->kprobe_multi.addrs);
+ qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64);
+
+ /* Load it once for all. */
+ if (!dd.sym_count)
+ kernel_syms_load(&dd);
+ for (i = 0; i < dd.sym_count; i++) {
+ if (dd.sym_mapping[i].address != addrs[j])
+ continue;
+ jsonw_start_object(json_wtr);
+ jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address);
+ jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name);
+ /* Print null if it is vmlinux */
+ if (dd.sym_mapping[i].module[0] == '\0') {
+ jsonw_name(json_wtr, "module");
+ jsonw_null(json_wtr);
+ } else {
+ jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module);
+ }
+ jsonw_end_object(json_wtr);
+ if (j++ == info->kprobe_multi.count)
+ break;
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void
+show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE);
+ jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr);
+ jsonw_string_field(wtr, "func",
+ u64_to_ptr(info->perf_event.kprobe.func_name));
+ jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
+}
+
+static void
+show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE);
+ jsonw_string_field(wtr, "file",
+ u64_to_ptr(info->perf_event.uprobe.file_name));
+ jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset);
+}
+
+static void
+show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ jsonw_string_field(wtr, "tracepoint",
+ u64_to_ptr(info->perf_event.tracepoint.tp_name));
+}
+
+static char *perf_config_hw_cache_str(__u64 config)
+{
+ const char *hw_cache, *result, *op;
+ char *str = malloc(PERF_HW_CACHE_LEN);
+
+ if (!str) {
+ p_err("mem alloc failed");
+ return NULL;
+ }
+
+ hw_cache = perf_event_name(evsel__hw_cache, config & 0xff);
+ if (hw_cache)
+ snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache);
+ else
+ snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff);
+
+ op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff);
+ if (op)
+ snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
+ "%s-", op);
+ else
+ snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
+ "%lld-", (config >> 8) & 0xff);
+
+ result = perf_event_name(evsel__hw_cache_result, config >> 16);
+ if (result)
+ snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
+ "%s", result);
+ else
+ snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str),
+ "%lld", config >> 16);
+ return str;
+}
+
+static const char *perf_config_str(__u32 type, __u64 config)
+{
+ const char *perf_config;
+
+ switch (type) {
+ case PERF_TYPE_HARDWARE:
+ perf_config = perf_event_name(event_symbols_hw, config);
+ break;
+ case PERF_TYPE_SOFTWARE:
+ perf_config = perf_event_name(event_symbols_sw, config);
+ break;
+ case PERF_TYPE_HW_CACHE:
+ perf_config = perf_config_hw_cache_str(config);
+ break;
+ default:
+ perf_config = NULL;
+ break;
+ }
+ return perf_config;
+}
+
+static void
+show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ __u64 config = info->perf_event.event.config;
+ __u32 type = info->perf_event.event.type;
+ const char *perf_type, *perf_config;
+
+ perf_type = perf_event_name(perf_type_name, type);
+ if (perf_type)
+ jsonw_string_field(wtr, "event_type", perf_type);
+ else
+ jsonw_uint_field(wtr, "event_type", type);
+
+ perf_config = perf_config_str(type, config);
+ if (perf_config)
+ jsonw_string_field(wtr, "event_config", perf_config);
+ else
+ jsonw_uint_field(wtr, "event_config", config);
+
+ if (type == PERF_TYPE_HW_CACHE && perf_config)
+ free((void *)perf_config);
+}
+
+static int show_link_close_json(int fd, struct bpf_link_info *info)
+{
+ struct bpf_prog_info prog_info;
+ const char *prog_type_str;
+ int err;
+
+ jsonw_start_object(json_wtr);
+
+ show_link_header_json(info, json_wtr);
+
+ switch (info->type) {
+ case BPF_LINK_TYPE_RAW_TRACEPOINT:
+ jsonw_string_field(json_wtr, "tp_name",
+ u64_to_ptr(info->raw_tracepoint.tp_name));
+ break;
+ case BPF_LINK_TYPE_TRACING:
+ err = get_prog_info(info->prog_id, &prog_info);
+ if (err)
+ return err;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "prog_type", prog_type_str);
+ else
+ jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
+
+ show_link_attach_type_json(info->tracing.attach_type,
+ json_wtr);
+ jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id);
+ jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id);
+ break;
+ case BPF_LINK_TYPE_CGROUP:
+ jsonw_lluint_field(json_wtr, "cgroup_id",
+ info->cgroup.cgroup_id);
+ show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
+ break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_json(info, json_wtr);
+ break;
+ case BPF_LINK_TYPE_NETNS:
+ jsonw_uint_field(json_wtr, "netns_ino",
+ info->netns.netns_ino);
+ show_link_attach_type_json(info->netns.attach_type, json_wtr);
+ break;
+ case BPF_LINK_TYPE_NETFILTER:
+ netfilter_dump_json(info, json_wtr);
+ break;
+ case BPF_LINK_TYPE_TCX:
+ show_link_ifindex_json(info->tcx.ifindex, json_wtr);
+ show_link_attach_type_json(info->tcx.attach_type, json_wtr);
+ break;
+ case BPF_LINK_TYPE_XDP:
+ show_link_ifindex_json(info->xdp.ifindex, json_wtr);
+ break;
+ case BPF_LINK_TYPE_STRUCT_OPS:
+ jsonw_uint_field(json_wtr, "map_id",
+ info->struct_ops.map_id);
+ break;
+ case BPF_LINK_TYPE_KPROBE_MULTI:
+ show_kprobe_multi_json(info, json_wtr);
+ break;
+ case BPF_LINK_TYPE_PERF_EVENT:
+ switch (info->perf_event.type) {
+ case BPF_PERF_EVENT_EVENT:
+ show_perf_event_event_json(info, json_wtr);
+ break;
+ case BPF_PERF_EVENT_TRACEPOINT:
+ show_perf_event_tracepoint_json(info, json_wtr);
+ break;
+ case BPF_PERF_EVENT_KPROBE:
+ case BPF_PERF_EVENT_KRETPROBE:
+ show_perf_event_kprobe_json(info, json_wtr);
+ break;
+ case BPF_PERF_EVENT_UPROBE:
+ case BPF_PERF_EVENT_URETPROBE:
+ show_perf_event_uprobe_json(info, json_wtr);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!hashmap__empty(link_table)) {
+ struct hashmap_entry *entry;
+
+ jsonw_name(json_wtr, "pinned");
+ jsonw_start_array(json_wtr);
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
+ jsonw_end_array(json_wtr);
+ }
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
+ jsonw_end_object(json_wtr);
+
+ return 0;
+}
+
+static void show_link_header_plain(struct bpf_link_info *info)
+{
+ const char *link_type_str;
+
+ printf("%u: ", info->id);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ printf("%s ", link_type_str);
+ else
+ printf("type %u ", info->type);
+
+ if (info->type == BPF_LINK_TYPE_STRUCT_OPS)
+ printf("map %u ", info->struct_ops.map_id);
+ else
+ printf("prog %u ", info->prog_id);
+}
+
+static void show_link_attach_type_plain(__u32 attach_type)
+{
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ printf("attach_type %s ", attach_type_str);
+ else
+ printf("attach_type %u ", attach_type);
+}
+
+static void show_link_ifindex_plain(__u32 ifindex)
+{
+ char devname[IF_NAMESIZE * 2] = "(unknown)";
+ char tmpname[IF_NAMESIZE];
+ char *ret = NULL;
+
+ if (ifindex)
+ ret = if_indextoname(ifindex, tmpname);
+ else
+ snprintf(devname, sizeof(devname), "(detached)");
+ if (ret)
+ snprintf(devname, sizeof(devname), "%s(%d)",
+ tmpname, ifindex);
+ printf("ifindex %s ", devname);
+}
+
+static void show_iter_plain(struct bpf_link_info *info)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ printf("target_name %s ", target_name);
+
+ if (is_iter_map_target(target_name))
+ printf("map_id %u ", info->iter.map.map_id);
+ else if (is_iter_task_target(target_name)) {
+ if (info->iter.task.tid)
+ printf("tid %u ", info->iter.task.tid);
+ else if (info->iter.task.pid)
+ printf("pid %u ", info->iter.task.pid);
+ }
+
+ if (is_iter_cgroup_target(target_name)) {
+ printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
+ printf("order %s ",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
+}
+
+static const char * const pf2name[] = {
+ [NFPROTO_INET] = "inet",
+ [NFPROTO_IPV4] = "ip",
+ [NFPROTO_ARP] = "arp",
+ [NFPROTO_NETDEV] = "netdev",
+ [NFPROTO_BRIDGE] = "bridge",
+ [NFPROTO_IPV6] = "ip6",
+};
+
+static const char * const inethook2name[] = {
+ [NF_INET_PRE_ROUTING] = "prerouting",
+ [NF_INET_LOCAL_IN] = "input",
+ [NF_INET_FORWARD] = "forward",
+ [NF_INET_LOCAL_OUT] = "output",
+ [NF_INET_POST_ROUTING] = "postrouting",
+};
+
+static const char * const arphook2name[] = {
+ [NF_ARP_IN] = "input",
+ [NF_ARP_OUT] = "output",
+};
+
+void netfilter_dump_plain(const struct bpf_link_info *info)
+{
+ const char *hookname = NULL, *pfname = NULL;
+ unsigned int hook = info->netfilter.hooknum;
+ unsigned int pf = info->netfilter.pf;
+
+ if (pf < ARRAY_SIZE(pf2name))
+ pfname = pf2name[pf];
+
+ switch (pf) {
+ case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ if (hook < ARRAY_SIZE(inethook2name))
+ hookname = inethook2name[hook];
+ break;
+ case NFPROTO_ARP:
+ if (hook < ARRAY_SIZE(arphook2name))
+ hookname = arphook2name[hook];
+ default:
+ break;
+ }
+
+ if (pfname)
+ printf("\n\t%s", pfname);
+ else
+ printf("\n\tpf: %d", pf);
+
+ if (hookname)
+ printf(" %s", hookname);
+ else
+ printf(", hook %u,", hook);
+
+ printf(" prio %d", info->netfilter.priority);
+
+ if (info->netfilter.flags)
+ printf(" flags 0x%x", info->netfilter.flags);
+}
+
+static void show_kprobe_multi_plain(struct bpf_link_info *info)
+{
+ __u32 i, j = 0;
+ __u64 *addrs;
+
+ if (!info->kprobe_multi.count)
+ return;
+
+ if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN)
+ printf("\n\tkretprobe.multi ");
+ else
+ printf("\n\tkprobe.multi ");
+ printf("func_cnt %u ", info->kprobe_multi.count);
+ addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
+ qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
+
+ /* Load it once for all. */
+ if (!dd.sym_count)
+ kernel_syms_load(&dd);
+ if (!dd.sym_count)
+ return;
+
+ printf("\n\t%-16s %s", "addr", "func [module]");
+ for (i = 0; i < dd.sym_count; i++) {
+ if (dd.sym_mapping[i].address != addrs[j])
+ continue;
+ printf("\n\t%016lx %s",
+ dd.sym_mapping[i].address, dd.sym_mapping[i].name);
+ if (dd.sym_mapping[i].module[0] != '\0')
+ printf(" [%s] ", dd.sym_mapping[i].module);
+ else
+ printf(" ");
+
+ if (j++ == info->kprobe_multi.count)
+ break;
+ }
+}
+
+static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
+{
+ const char *buf;
+
+ buf = u64_to_ptr(info->perf_event.kprobe.func_name);
+ if (buf[0] == '\0' && !info->perf_event.kprobe.addr)
+ return;
+
+ if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE)
+ printf("\n\tkretprobe ");
+ else
+ printf("\n\tkprobe ");
+ if (info->perf_event.kprobe.addr)
+ printf("%llx ", info->perf_event.kprobe.addr);
+ printf("%s", buf);
+ if (info->perf_event.kprobe.offset)
+ printf("+%#x", info->perf_event.kprobe.offset);
+ printf(" ");
+}
+
+static void show_perf_event_uprobe_plain(struct bpf_link_info *info)
+{
+ const char *buf;
+
+ buf = u64_to_ptr(info->perf_event.uprobe.file_name);
+ if (buf[0] == '\0')
+ return;
+
+ if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE)
+ printf("\n\turetprobe ");
+ else
+ printf("\n\tuprobe ");
+ printf("%s+%#x ", buf, info->perf_event.uprobe.offset);
+}
+
+static void show_perf_event_tracepoint_plain(struct bpf_link_info *info)
+{
+ const char *buf;
+
+ buf = u64_to_ptr(info->perf_event.tracepoint.tp_name);
+ if (buf[0] == '\0')
+ return;
+
+ printf("\n\ttracepoint %s ", buf);
+}
+
+static void show_perf_event_event_plain(struct bpf_link_info *info)
+{
+ __u64 config = info->perf_event.event.config;
+ __u32 type = info->perf_event.event.type;
+ const char *perf_type, *perf_config;
+
+ printf("\n\tevent ");
+ perf_type = perf_event_name(perf_type_name, type);
+ if (perf_type)
+ printf("%s:", perf_type);
+ else
+ printf("%u :", type);
+
+ perf_config = perf_config_str(type, config);
+ if (perf_config)
+ printf("%s ", perf_config);
+ else
+ printf("%llu ", config);
+
+ if (type == PERF_TYPE_HW_CACHE && perf_config)
+ free((void *)perf_config);
+}
+
+static int show_link_close_plain(int fd, struct bpf_link_info *info)
+{
+ struct bpf_prog_info prog_info;
+ const char *prog_type_str;
+ int err;
+
+ show_link_header_plain(info);
+
+ switch (info->type) {
+ case BPF_LINK_TYPE_RAW_TRACEPOINT:
+ printf("\n\ttp '%s' ",
+ (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
+ break;
+ case BPF_LINK_TYPE_TRACING:
+ err = get_prog_info(info->prog_id, &prog_info);
+ if (err)
+ return err;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ printf("\n\tprog_type %s ", prog_type_str);
+ else
+ printf("\n\tprog_type %u ", prog_info.type);
+
+ show_link_attach_type_plain(info->tracing.attach_type);
+ if (info->tracing.target_obj_id || info->tracing.target_btf_id)
+ printf("\n\ttarget_obj_id %u target_btf_id %u ",
+ info->tracing.target_obj_id,
+ info->tracing.target_btf_id);
+ break;
+ case BPF_LINK_TYPE_CGROUP:
+ printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
+ show_link_attach_type_plain(info->cgroup.attach_type);
+ break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_plain(info);
+ break;
+ case BPF_LINK_TYPE_NETNS:
+ printf("\n\tnetns_ino %u ", info->netns.netns_ino);
+ show_link_attach_type_plain(info->netns.attach_type);
+ break;
+ case BPF_LINK_TYPE_NETFILTER:
+ netfilter_dump_plain(info);
+ break;
+ case BPF_LINK_TYPE_TCX:
+ printf("\n\t");
+ show_link_ifindex_plain(info->tcx.ifindex);
+ show_link_attach_type_plain(info->tcx.attach_type);
+ break;
+ case BPF_LINK_TYPE_XDP:
+ printf("\n\t");
+ show_link_ifindex_plain(info->xdp.ifindex);
+ break;
+ case BPF_LINK_TYPE_KPROBE_MULTI:
+ show_kprobe_multi_plain(info);
+ break;
+ case BPF_LINK_TYPE_PERF_EVENT:
+ switch (info->perf_event.type) {
+ case BPF_PERF_EVENT_EVENT:
+ show_perf_event_event_plain(info);
+ break;
+ case BPF_PERF_EVENT_TRACEPOINT:
+ show_perf_event_tracepoint_plain(info);
+ break;
+ case BPF_PERF_EVENT_KPROBE:
+ case BPF_PERF_EVENT_KRETPROBE:
+ show_perf_event_kprobe_plain(info);
+ break;
+ case BPF_PERF_EVENT_UPROBE:
+ case BPF_PERF_EVENT_URETPROBE:
+ show_perf_event_uprobe_plain(info);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!hashmap__empty(link_table)) {
+ struct hashmap_entry *entry;
+
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
+ }
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
+ printf("\n");
+
+ return 0;
+}
+
+static int do_show_link(int fd)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ __u64 *addrs = NULL;
+ char buf[PATH_MAX];
+ int count;
+ int err;
+
+ memset(&info, 0, sizeof(info));
+ buf[0] = '\0';
+again:
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get link info: %s",
+ strerror(errno));
+ close(fd);
+ return err;
+ }
+ if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
+ !info.raw_tracepoint.tp_name) {
+ info.raw_tracepoint.tp_name = ptr_to_u64(&buf);
+ info.raw_tracepoint.tp_name_len = sizeof(buf);
+ goto again;
+ }
+ if (info.type == BPF_LINK_TYPE_ITER &&
+ !info.iter.target_name) {
+ info.iter.target_name = ptr_to_u64(&buf);
+ info.iter.target_name_len = sizeof(buf);
+ goto again;
+ }
+ if (info.type == BPF_LINK_TYPE_KPROBE_MULTI &&
+ !info.kprobe_multi.addrs) {
+ count = info.kprobe_multi.count;
+ if (count) {
+ addrs = calloc(count, sizeof(__u64));
+ if (!addrs) {
+ p_err("mem alloc failed");
+ close(fd);
+ return -ENOMEM;
+ }
+ info.kprobe_multi.addrs = ptr_to_u64(addrs);
+ goto again;
+ }
+ }
+ if (info.type == BPF_LINK_TYPE_PERF_EVENT) {
+ switch (info.perf_event.type) {
+ case BPF_PERF_EVENT_TRACEPOINT:
+ if (!info.perf_event.tracepoint.tp_name) {
+ info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
+ info.perf_event.tracepoint.name_len = sizeof(buf);
+ goto again;
+ }
+ break;
+ case BPF_PERF_EVENT_KPROBE:
+ case BPF_PERF_EVENT_KRETPROBE:
+ if (!info.perf_event.kprobe.func_name) {
+ info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
+ info.perf_event.kprobe.name_len = sizeof(buf);
+ goto again;
+ }
+ break;
+ case BPF_PERF_EVENT_UPROBE:
+ case BPF_PERF_EVENT_URETPROBE:
+ if (!info.perf_event.uprobe.file_name) {
+ info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
+ info.perf_event.uprobe.name_len = sizeof(buf);
+ goto again;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (json_output)
+ show_link_close_json(fd, &info);
+ else
+ show_link_close_plain(fd, &info);
+
+ if (addrs)
+ free(addrs);
+ close(fd);
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ __u32 id = 0;
+ int err, fd;
+
+ if (show_pinned) {
+ link_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(link_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(link_table, BPF_OBJ_LINK);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
+
+ if (argc == 2) {
+ fd = link_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
+ do_show_link(fd);
+ goto out;
+ }
+
+ if (argc)
+ return BAD_ARG();
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ while (true) {
+ err = bpf_link_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ p_err("can't get next link: %s%s", strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ break;
+ }
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get link by id (%u): %s",
+ id, strerror(errno));
+ break;
+ }
+
+ err = do_show_link(fd);
+ if (err)
+ break;
+ }
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(link_table);
+
+out:
+ if (dd.sym_count)
+ kernel_syms_destroy(&dd);
+ return errno == ENOENT ? 0 : -1;
+}
+
+static int do_pin(int argc, char **argv)
+{
+ int err;
+
+ err = do_pin_any(argc, argv, link_parse_fd);
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ int err, fd;
+
+ if (argc != 2) {
+ p_err("link specifier is invalid or missing\n");
+ return 1;
+ }
+
+ fd = link_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return 1;
+
+ err = bpf_link_detach(fd);
+ if (err)
+ err = -errno;
+ close(fd);
+ if (err) {
+ p_err("failed link detach: %s", strerror(-err));
+ return 1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [LINK]\n"
+ " %1$s %2$s pin LINK FILE\n"
+ " %1$s %2$s detach LINK\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_LINK "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-n|--nomount} }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { "pin", do_pin },
+ { "detach", do_detach },
+ { 0 }
+};
+
+int do_link(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
new file mode 100644
index 0000000000..08d0ac543c
--- /dev/null
+++ b/tools/bpf/bpftool/main.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h>
+
+#include "main.h"
+
+#define BATCH_LINE_LEN_MAX 65536
+#define BATCH_ARG_NB_MAX 4096
+
+const char *bin_name;
+static int last_argc;
+static char **last_argv;
+static int (*last_do_help)(int argc, char **argv);
+json_writer_t *json_wtr;
+bool pretty_output;
+bool json_output;
+bool show_pinned;
+bool block_mount;
+bool verifier_logs;
+bool relaxed_maps;
+bool use_loader;
+struct btf *base_btf;
+struct hashmap *refs_table;
+
+static void __noreturn clean_and_exit(int i)
+{
+ if (json_output)
+ jsonw_destroy(&json_wtr);
+
+ exit(i);
+}
+
+void usage(void)
+{
+ last_do_help(last_argc - 1, last_argv + 1);
+
+ clean_and_exit(-1);
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %s [OPTIONS] OBJECT { COMMAND | help }\n"
+ " %s batch file FILE\n"
+ " %s version\n"
+ "\n"
+ " OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-V|--version} }\n"
+ "",
+ bin_name, bin_name, bin_name);
+
+ return 0;
+}
+
+static int do_batch(int argc, char **argv);
+static int do_version(int argc, char **argv);
+
+static const struct cmd commands[] = {
+ { "help", do_help },
+ { "batch", do_batch },
+ { "prog", do_prog },
+ { "map", do_map },
+ { "link", do_link },
+ { "cgroup", do_cgroup },
+ { "perf", do_perf },
+ { "net", do_net },
+ { "feature", do_feature },
+ { "btf", do_btf },
+ { "gen", do_gen },
+ { "struct_ops", do_struct_ops },
+ { "iter", do_iter },
+ { "version", do_version },
+ { 0 }
+};
+
+#ifndef BPFTOOL_VERSION
+/* bpftool's major and minor version numbers are aligned on libbpf's. There is
+ * an offset of 6 for the version number, because bpftool's version was higher
+ * than libbpf's when we adopted this scheme. The patch number remains at 0
+ * for now. Set BPFTOOL_VERSION to override.
+ */
+#define BPFTOOL_MAJOR_VERSION (LIBBPF_MAJOR_VERSION + 6)
+#define BPFTOOL_MINOR_VERSION LIBBPF_MINOR_VERSION
+#define BPFTOOL_PATCH_VERSION 0
+#endif
+
+static void
+print_feature(const char *feature, bool state, unsigned int *nb_features)
+{
+ if (state) {
+ printf("%s %s", *nb_features ? "," : "", feature);
+ *nb_features = *nb_features + 1;
+ }
+}
+
+static int do_version(int argc, char **argv)
+{
+#ifdef HAVE_LIBBFD_SUPPORT
+ const bool has_libbfd = true;
+#else
+ const bool has_libbfd = false;
+#endif
+#ifdef HAVE_LLVM_SUPPORT
+ const bool has_llvm = true;
+#else
+ const bool has_llvm = false;
+#endif
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+ const bool has_skeletons = false;
+#else
+ const bool has_skeletons = true;
+#endif
+ bool bootstrap = false;
+ int i;
+
+ for (i = 0; commands[i].cmd; i++) {
+ if (!strcmp(commands[i].cmd, "prog")) {
+ /* Assume we run a bootstrap version if "bpftool prog"
+ * is not available.
+ */
+ bootstrap = !commands[i].func;
+ break;
+ }
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr); /* root object */
+
+ jsonw_name(json_wtr, "version");
+#ifdef BPFTOOL_VERSION
+ jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
+#else
+ jsonw_printf(json_wtr, "\"%d.%d.%d\"", BPFTOOL_MAJOR_VERSION,
+ BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
+#endif
+ jsonw_name(json_wtr, "libbpf_version");
+ jsonw_printf(json_wtr, "\"%d.%d\"",
+ libbpf_major_version(), libbpf_minor_version());
+
+ jsonw_name(json_wtr, "features");
+ jsonw_start_object(json_wtr); /* features */
+ jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
+ jsonw_bool_field(json_wtr, "llvm", has_llvm);
+ jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
+ jsonw_bool_field(json_wtr, "bootstrap", bootstrap);
+ jsonw_end_object(json_wtr); /* features */
+
+ jsonw_end_object(json_wtr); /* root object */
+ } else {
+ unsigned int nb_features = 0;
+
+#ifdef BPFTOOL_VERSION
+ printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
+#else
+ printf("%s v%d.%d.%d\n", bin_name, BPFTOOL_MAJOR_VERSION,
+ BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
+#endif
+ printf("using libbpf %s\n", libbpf_version_string());
+ printf("features:");
+ print_feature("libbfd", has_libbfd, &nb_features);
+ print_feature("llvm", has_llvm, &nb_features);
+ print_feature("skeletons", has_skeletons, &nb_features);
+ print_feature("bootstrap", bootstrap, &nb_features);
+ printf("\n");
+ }
+ return 0;
+}
+
+int cmd_select(const struct cmd *cmds, int argc, char **argv,
+ int (*help)(int argc, char **argv))
+{
+ unsigned int i;
+
+ last_argc = argc;
+ last_argv = argv;
+ last_do_help = help;
+
+ if (argc < 1 && cmds[0].func)
+ return cmds[0].func(argc, argv);
+
+ for (i = 0; cmds[i].cmd; i++) {
+ if (is_prefix(*argv, cmds[i].cmd)) {
+ if (!cmds[i].func) {
+ p_err("command '%s' is not supported in bootstrap mode",
+ cmds[i].cmd);
+ return -1;
+ }
+ return cmds[i].func(argc - 1, argv + 1);
+ }
+ }
+
+ help(argc - 1, argv + 1);
+
+ return -1;
+}
+
+bool is_prefix(const char *pfx, const char *str)
+{
+ if (!pfx)
+ return false;
+ if (strlen(str) < strlen(pfx))
+ return false;
+
+ return !memcmp(str, pfx, strlen(pfx));
+}
+
+/* Last argument MUST be NULL pointer */
+int detect_common_prefix(const char *arg, ...)
+{
+ unsigned int count = 0;
+ const char *ref;
+ char msg[256];
+ va_list ap;
+
+ snprintf(msg, sizeof(msg), "ambiguous prefix: '%s' could be '", arg);
+ va_start(ap, arg);
+ while ((ref = va_arg(ap, const char *))) {
+ if (!is_prefix(arg, ref))
+ continue;
+ count++;
+ if (count > 1)
+ strncat(msg, "' or '", sizeof(msg) - strlen(msg) - 1);
+ strncat(msg, ref, sizeof(msg) - strlen(msg) - 1);
+ }
+ va_end(ap);
+ strncat(msg, "'", sizeof(msg) - strlen(msg) - 1);
+
+ if (count >= 2) {
+ p_err("%s", msg);
+ return -1;
+ }
+
+ return 0;
+}
+
+void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep)
+{
+ unsigned char *data = arg;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ const char *pfx = "";
+
+ if (!i)
+ /* nothing */;
+ else if (!(i % 16))
+ fprintf(f, "\n");
+ else if (!(i % 8))
+ fprintf(f, " ");
+ else
+ pfx = sep;
+
+ fprintf(f, "%s%02hhx", i ? pfx : "", data[i]);
+ }
+}
+
+/* Split command line into argument vector. */
+static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
+{
+ static const char ws[] = " \t\r\n";
+ char *cp = line;
+ int n_argc = 0;
+
+ while (*cp) {
+ /* Skip leading whitespace. */
+ cp += strspn(cp, ws);
+
+ if (*cp == '\0')
+ break;
+
+ if (n_argc >= (maxargs - 1)) {
+ p_err("too many arguments to command %d", cmd_nb);
+ return -1;
+ }
+
+ /* Word begins with quote. */
+ if (*cp == '\'' || *cp == '"') {
+ char quote = *cp++;
+
+ n_argv[n_argc++] = cp;
+ /* Find ending quote. */
+ cp = strchr(cp, quote);
+ if (!cp) {
+ p_err("unterminated quoted string in command %d",
+ cmd_nb);
+ return -1;
+ }
+ } else {
+ n_argv[n_argc++] = cp;
+
+ /* Find end of word. */
+ cp += strcspn(cp, ws);
+ if (*cp == '\0')
+ break;
+ }
+
+ /* Separate words. */
+ *cp++ = 0;
+ }
+ n_argv[n_argc] = NULL;
+
+ return n_argc;
+}
+
+static int do_batch(int argc, char **argv)
+{
+ char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
+ char *n_argv[BATCH_ARG_NB_MAX];
+ unsigned int lines = 0;
+ int n_argc;
+ FILE *fp;
+ char *cp;
+ int err = 0;
+ int i;
+
+ if (argc < 2) {
+ p_err("too few parameters for batch");
+ return -1;
+ } else if (argc > 2) {
+ p_err("too many parameters for batch");
+ return -1;
+ } else if (!is_prefix(*argv, "file")) {
+ p_err("expected 'file', got: %s", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (!strcmp(*argv, "-"))
+ fp = stdin;
+ else
+ fp = fopen(*argv, "r");
+ if (!fp) {
+ p_err("Can't open file (%s): %s", *argv, strerror(errno));
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ while (fgets(buf, sizeof(buf), fp)) {
+ cp = strchr(buf, '#');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(buf) == sizeof(buf) - 1) {
+ errno = E2BIG;
+ break;
+ }
+
+ /* Append continuation lines if any (coming after a line ending
+ * with '\' in the batch file).
+ */
+ while ((cp = strstr(buf, "\\\n")) != NULL) {
+ if (!fgets(contline, sizeof(contline), fp) ||
+ strlen(contline) == 0) {
+ p_err("missing continuation line on command %d",
+ lines);
+ err = -1;
+ goto err_close;
+ }
+
+ cp = strchr(contline, '#');
+ if (cp)
+ *cp = '\0';
+
+ if (strlen(buf) + strlen(contline) + 1 > sizeof(buf)) {
+ p_err("command %d is too long", lines);
+ err = -1;
+ goto err_close;
+ }
+ buf[strlen(buf) - 2] = '\0';
+ strcat(buf, contline);
+ }
+
+ n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
+ if (!n_argc)
+ continue;
+ if (n_argc < 0) {
+ err = n_argc;
+ goto err_close;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "command");
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < n_argc; i++)
+ jsonw_string(json_wtr, n_argv[i]);
+ jsonw_end_array(json_wtr);
+ jsonw_name(json_wtr, "output");
+ }
+
+ err = cmd_select(commands, n_argc, n_argv, do_help);
+
+ if (json_output)
+ jsonw_end_object(json_wtr);
+
+ if (err)
+ goto err_close;
+
+ lines++;
+ }
+
+ if (errno && errno != ENOENT) {
+ p_err("reading batch file failed: %s", strerror(errno));
+ err = -1;
+ } else {
+ if (!json_output)
+ printf("processed %d commands\n", lines);
+ }
+err_close:
+ if (fp != stdin)
+ fclose(fp);
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ return err;
+}
+
+int main(int argc, char **argv)
+{
+ static const struct option options[] = {
+ { "json", no_argument, NULL, 'j' },
+ { "help", no_argument, NULL, 'h' },
+ { "pretty", no_argument, NULL, 'p' },
+ { "version", no_argument, NULL, 'V' },
+ { "bpffs", no_argument, NULL, 'f' },
+ { "mapcompat", no_argument, NULL, 'm' },
+ { "nomount", no_argument, NULL, 'n' },
+ { "debug", no_argument, NULL, 'd' },
+ { "use-loader", no_argument, NULL, 'L' },
+ { "base-btf", required_argument, NULL, 'B' },
+ { 0 }
+ };
+ bool version_requested = false;
+ int opt, ret;
+
+ setlinebuf(stdout);
+
+#ifdef USE_LIBCAP
+ /* Libcap < 2.63 hooks before main() to compute the number of
+ * capabilities of the running kernel, and doing so it calls prctl()
+ * which may fail and set errno to non-zero.
+ * Let's reset errno to make sure this does not interfere with the
+ * batch mode.
+ */
+ errno = 0;
+#endif
+
+ last_do_help = do_help;
+ pretty_output = false;
+ json_output = false;
+ show_pinned = false;
+ block_mount = false;
+ bin_name = "bpftool";
+
+ opterr = 0;
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
+ options, NULL)) >= 0) {
+ switch (opt) {
+ case 'V':
+ version_requested = true;
+ break;
+ case 'h':
+ return do_help(argc, argv);
+ case 'p':
+ pretty_output = true;
+ /* fall through */
+ case 'j':
+ if (!json_output) {
+ json_wtr = jsonw_new(stdout);
+ if (!json_wtr) {
+ p_err("failed to create JSON writer");
+ return -1;
+ }
+ json_output = true;
+ }
+ jsonw_pretty(json_wtr, pretty_output);
+ break;
+ case 'f':
+ show_pinned = true;
+ break;
+ case 'm':
+ relaxed_maps = true;
+ break;
+ case 'n':
+ block_mount = true;
+ break;
+ case 'd':
+ libbpf_set_print(print_all_levels);
+ verifier_logs = true;
+ break;
+ case 'B':
+ base_btf = btf__parse(optarg, NULL);
+ if (!base_btf) {
+ p_err("failed to parse base BTF at '%s': %d\n",
+ optarg, -errno);
+ return -1;
+ }
+ break;
+ case 'L':
+ use_loader = true;
+ break;
+ default:
+ p_err("unrecognized option '%s'", argv[optind - 1]);
+ if (json_output)
+ clean_and_exit(-1);
+ else
+ usage();
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+ if (argc < 0)
+ usage();
+
+ if (version_requested)
+ return do_version(argc, argv);
+
+ ret = cmd_select(commands, argc, argv, do_help);
+
+ if (json_output)
+ jsonw_destroy(&json_wtr);
+
+ btf__free(base_btf);
+
+ return ret;
+}
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
new file mode 100644
index 0000000000..b8bb08d10d
--- /dev/null
+++ b/tools/bpf/bpftool/main.h
@@ -0,0 +1,273 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
+#ifndef __BPF_TOOL_H
+#define __BPF_TOOL_H
+
+/* BFD and kernel.h both define GCC_VERSION, differently */
+#undef GCC_VERSION
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bpf.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h>
+
+#include "json_writer.h"
+
+/* Make sure we do not use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+ return (void *)(unsigned long)ptr;
+}
+
+#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
+#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
+#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
+#define GET_ARG() ({ argc--; *argv++; })
+#define REQ_ARGS(cnt) \
+ ({ \
+ int _cnt = (cnt); \
+ bool _res; \
+ \
+ if (argc < _cnt) { \
+ p_err("'%s' needs at least %d arguments, %d found", \
+ argv[-1], _cnt, argc); \
+ _res = false; \
+ } else { \
+ _res = true; \
+ } \
+ _res; \
+ })
+
+#define ERR_MAX_LEN 1024
+
+#define BPF_TAG_FMT "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
+
+#define HELP_SPEC_PROGRAM \
+ "PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
+#define HELP_SPEC_OPTIONS \
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
+#define HELP_SPEC_MAP \
+ "MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
+#define HELP_SPEC_LINK \
+ "LINK := { id LINK_ID | pinned FILE }"
+
+/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
+enum bpf_obj_type {
+ BPF_OBJ_UNKNOWN,
+ BPF_OBJ_PROG,
+ BPF_OBJ_MAP,
+ BPF_OBJ_LINK,
+ BPF_OBJ_BTF,
+};
+
+extern const char *bin_name;
+
+extern json_writer_t *json_wtr;
+extern bool json_output;
+extern bool show_pinned;
+extern bool show_pids;
+extern bool block_mount;
+extern bool verifier_logs;
+extern bool relaxed_maps;
+extern bool use_loader;
+extern struct btf *base_btf;
+extern struct hashmap *refs_table;
+
+void __printf(1, 2) p_err(const char *fmt, ...);
+void __printf(1, 2) p_info(const char *fmt, ...);
+
+bool is_prefix(const char *pfx, const char *str);
+int detect_common_prefix(const char *arg, ...);
+void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
+void usage(void) __noreturn;
+
+void set_max_rlimit(void);
+
+int mount_tracefs(const char *target);
+
+struct obj_ref {
+ int pid;
+ char comm[16];
+};
+
+struct obj_refs {
+ int ref_cnt;
+ bool has_bpf_cookie;
+ struct obj_ref *refs;
+ __u64 bpf_cookie;
+};
+
+struct btf;
+struct bpf_line_info;
+
+int build_pinned_obj_table(struct hashmap *table,
+ enum bpf_obj_type type);
+void delete_pinned_obj_table(struct hashmap *table);
+__weak int build_obj_refs_table(struct hashmap **table,
+ enum bpf_obj_type type);
+__weak void delete_obj_refs_table(struct hashmap *table);
+__weak void emit_obj_refs_json(struct hashmap *table, __u32 id,
+ json_writer_t *json_wtr);
+__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id,
+ const char *prefix);
+void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
+
+struct cmd {
+ const char *cmd;
+ int (*func)(int argc, char **argv);
+};
+
+int cmd_select(const struct cmd *cmds, int argc, char **argv,
+ int (*help)(int argc, char **argv));
+
+#define MAX_PROG_FULL_NAME 128
+void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ char *name_buff, size_t buff_len);
+
+int get_fd_type(int fd);
+const char *get_fd_type_name(enum bpf_obj_type type);
+char *get_fdinfo(int fd, const char *key);
+int open_obj_pinned(const char *path, bool quiet);
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
+int mount_bpffs_for_pin(const char *name, bool is_dir);
+int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
+int do_pin_fd(int fd, const char *name);
+
+/* commands available in bootstrap mode */
+int do_gen(int argc, char **argv);
+int do_btf(int argc, char **argv);
+
+/* non-bootstrap only commands */
+int do_prog(int argc, char **arg) __weak;
+int do_map(int argc, char **arg) __weak;
+int do_link(int argc, char **arg) __weak;
+int do_event_pipe(int argc, char **argv) __weak;
+int do_cgroup(int argc, char **arg) __weak;
+int do_perf(int argc, char **arg) __weak;
+int do_net(int argc, char **arg) __weak;
+int do_tracelog(int argc, char **arg) __weak;
+int do_feature(int argc, char **argv) __weak;
+int do_struct_ops(int argc, char **argv) __weak;
+int do_iter(int argc, char **argv) __weak;
+
+int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
+int prog_parse_fd(int *argc, char ***argv);
+int prog_parse_fds(int *argc, char ***argv, int **fds);
+int map_parse_fd(int *argc, char ***argv);
+int map_parse_fds(int *argc, char ***argv, int **fds);
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len);
+
+struct bpf_prog_linfo;
+#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum);
+int disasm_init(void);
+#else
+static inline
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
+{
+ return 0;
+}
+static inline int disasm_init(void)
+{
+ p_err("No JIT disassembly support");
+ return -1;
+}
+#endif
+void print_data_json(uint8_t *data, size_t len);
+void print_hex_data_json(uint8_t *data, size_t len);
+
+unsigned int get_page_size(void);
+unsigned int get_possible_cpus(void);
+const char *
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt);
+
+struct btf_dumper {
+ const struct btf *btf;
+ json_writer_t *jw;
+ bool is_plain_text;
+ bool prog_id_as_func_ptr;
+};
+
+/* btf_dumper_type - print data along with type information
+ * @d: an instance containing context for dumping types
+ * @type_id: index in btf->types array. this points to the type to be dumped
+ * @data: pointer the actual data, i.e. the values to be printed
+ *
+ * Returns zero on success and negative error code otherwise
+ */
+int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
+ const void *data);
+void btf_dumper_type_only(const struct btf *btf, __u32 func_type_id,
+ char *func_only, int size);
+
+void btf_dump_linfo_plain(const struct btf *btf,
+ const struct bpf_line_info *linfo,
+ const char *prefix, bool linum);
+void btf_dump_linfo_json(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum);
+void btf_dump_linfo_dotlabel(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum);
+
+struct nlattr;
+struct ifinfomsg;
+struct tcmsg;
+int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
+int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
+ const char *devname, int ifindex);
+
+int print_all_levels(__maybe_unused enum libbpf_print_level level,
+ const char *format, va_list args);
+
+size_t hash_fn_for_key_as_id(long key, void *ctx);
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx);
+
+/* bpf_attach_type_input_str - convert the provided attach type value into a
+ * textual representation that we accept for input purposes.
+ *
+ * This function is similar in nature to libbpf_bpf_attach_type_str, but
+ * recognizes some attach type names that have been used by the program in the
+ * past and which do not follow the string inference scheme that libbpf uses.
+ * These textual representations should only be used for user input.
+ *
+ * @t: The attach type
+ * Returns a pointer to a static string identifying the attach type. NULL is
+ * returned for unknown bpf_attach_type values.
+ */
+const char *bpf_attach_type_input_str(enum bpf_attach_type t);
+
+static inline bool hashmap__empty(struct hashmap *map)
+{
+ return map ? hashmap__size(map) == 0 : true;
+}
+
+int pathname_concat(char *buf, int buf_sz, const char *path,
+ const char *name);
+
+/* print netfilter bpf_link info */
+void netfilter_dump_plain(const struct bpf_link_info *info);
+void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr);
+#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
new file mode 100644
index 0000000000..f98f7bbea2
--- /dev/null
+++ b/tools/bpf/bpftool/map.c
@@ -0,0 +1,1499 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <net/if.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+static struct hashmap *map_table;
+
+static bool map_is_per_cpu(__u32 type)
+{
+ return type == BPF_MAP_TYPE_PERCPU_HASH ||
+ type == BPF_MAP_TYPE_PERCPU_ARRAY ||
+ type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
+ type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE;
+}
+
+static bool map_is_map_of_maps(__u32 type)
+{
+ return type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ type == BPF_MAP_TYPE_HASH_OF_MAPS;
+}
+
+static bool map_is_map_of_progs(__u32 type)
+{
+ return type == BPF_MAP_TYPE_PROG_ARRAY;
+}
+
+static int map_type_from_str(const char *type)
+{
+ const char *map_type_str;
+ unsigned int i;
+
+ for (i = 0; ; i++) {
+ map_type_str = libbpf_bpf_map_type_str(i);
+ if (!map_type_str)
+ break;
+
+ /* Don't allow prefixing in case of possible future shadowing */
+ if (!strcmp(map_type_str, type))
+ return i;
+ }
+ return -1;
+}
+
+static void *alloc_value(struct bpf_map_info *info)
+{
+ if (map_is_per_cpu(info->type))
+ return malloc(round_up(info->value_size, 8) *
+ get_possible_cpus());
+ else
+ return malloc(info->value_size);
+}
+
+static int do_dump_btf(const struct btf_dumper *d,
+ struct bpf_map_info *map_info, void *key,
+ void *value)
+{
+ __u32 value_id;
+ int ret = 0;
+
+ /* start of key-value pair */
+ jsonw_start_object(d->jw);
+
+ if (map_info->btf_key_type_id) {
+ jsonw_name(d->jw, "key");
+
+ ret = btf_dumper_type(d, map_info->btf_key_type_id, key);
+ if (ret)
+ goto err_end_obj;
+ }
+
+ value_id = map_info->btf_vmlinux_value_type_id ?
+ : map_info->btf_value_type_id;
+
+ if (!map_is_per_cpu(map_info->type)) {
+ jsonw_name(d->jw, "value");
+ ret = btf_dumper_type(d, value_id, value);
+ } else {
+ unsigned int i, n, step;
+
+ jsonw_name(d->jw, "values");
+ jsonw_start_array(d->jw);
+ n = get_possible_cpus();
+ step = round_up(map_info->value_size, 8);
+ for (i = 0; i < n; i++) {
+ jsonw_start_object(d->jw);
+ jsonw_int_field(d->jw, "cpu", i);
+ jsonw_name(d->jw, "value");
+ ret = btf_dumper_type(d, value_id, value + i * step);
+ jsonw_end_object(d->jw);
+ if (ret)
+ break;
+ }
+ jsonw_end_array(d->jw);
+ }
+
+err_end_obj:
+ /* end of key-value pair */
+ jsonw_end_object(d->jw);
+
+ return ret;
+}
+
+static json_writer_t *get_btf_writer(void)
+{
+ json_writer_t *jw = jsonw_new(stdout);
+
+ if (!jw)
+ return NULL;
+ jsonw_pretty(jw, true);
+
+ return jw;
+}
+
+static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
+ unsigned char *value, struct btf *btf)
+{
+ jsonw_start_object(json_wtr);
+
+ if (!map_is_per_cpu(info->type)) {
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, info->key_size);
+ jsonw_name(json_wtr, "value");
+ print_hex_data_json(value, info->value_size);
+ if (map_is_map_of_maps(info->type))
+ jsonw_uint_field(json_wtr, "inner_map_id",
+ *(unsigned int *)value);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ jsonw_name(json_wtr, "formatted");
+ do_dump_btf(&d, info, key, value);
+ }
+ } else {
+ unsigned int i, n, step;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, info->key_size);
+
+ jsonw_name(json_wtr, "values");
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < n; i++) {
+ jsonw_start_object(json_wtr);
+
+ jsonw_int_field(json_wtr, "cpu", i);
+
+ jsonw_name(json_wtr, "value");
+ print_hex_data_json(value + i * step,
+ info->value_size);
+
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+ if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ jsonw_name(json_wtr, "formatted");
+ do_dump_btf(&d, info, key, value);
+ }
+ }
+
+ jsonw_end_object(json_wtr);
+}
+
+static void
+print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
+ const char *error_msg)
+{
+ int msg_size = strlen(error_msg);
+ bool single_line, break_names;
+
+ break_names = info->key_size > 16 || msg_size > 16;
+ single_line = info->key_size + msg_size <= 24 && !break_names;
+
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
+
+ printf(single_line ? " " : "\n");
+
+ printf("value:%c%s", break_names ? '\n' : ' ', error_msg);
+
+ printf("\n");
+}
+
+static void
+print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
+{
+ /* For prog_array maps or arrays of maps, failure to lookup the value
+ * means there is no entry for that key. Do not print an error message
+ * in that case.
+ */
+ if ((map_is_map_of_maps(map_info->type) ||
+ map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
+ return;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr); /* entry */
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, map_info->key_size);
+ jsonw_name(json_wtr, "value");
+ jsonw_start_object(json_wtr); /* error */
+ jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
+ jsonw_end_object(json_wtr); /* error */
+ jsonw_end_object(json_wtr); /* entry */
+ } else {
+ const char *msg = NULL;
+
+ if (lookup_errno == ENOENT)
+ msg = "<no entry>";
+ else if (lookup_errno == ENOSPC &&
+ map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ msg = "<cannot read>";
+
+ print_entry_error_msg(map_info, key,
+ msg ? : strerror(lookup_errno));
+ }
+}
+
+static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
+ unsigned char *value)
+{
+ if (!map_is_per_cpu(info->type)) {
+ bool single_line, break_names;
+
+ break_names = info->key_size > 16 || info->value_size > 16;
+ single_line = info->key_size + info->value_size <= 24 &&
+ !break_names;
+
+ if (info->key_size) {
+ printf("key:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, key, info->key_size, " ");
+
+ printf(single_line ? " " : "\n");
+ }
+
+ if (info->value_size) {
+ if (map_is_map_of_maps(info->type)) {
+ printf("inner_map_id:%c", break_names ? '\n' : ' ');
+ printf("%u ", *(unsigned int *)value);
+ } else {
+ printf("value:%c", break_names ? '\n' : ' ');
+ fprint_hex(stdout, value, info->value_size, " ");
+ }
+ }
+
+ printf("\n");
+ } else {
+ unsigned int i, n, step;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+
+ if (info->key_size) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info->key_size, " ");
+ printf("\n");
+ }
+ if (info->value_size) {
+ for (i = 0; i < n; i++) {
+ printf("value (CPU %02d):%c",
+ i, info->value_size > 16 ? '\n' : ' ');
+ fprint_hex(stdout, value + i * step,
+ info->value_size, " ");
+ printf("\n");
+ }
+ }
+ }
+}
+
+static char **parse_bytes(char **argv, const char *name, unsigned char *val,
+ unsigned int n)
+{
+ unsigned int i = 0, base = 0;
+ char *endptr;
+
+ if (is_prefix(*argv, "hex")) {
+ base = 16;
+ argv++;
+ }
+
+ while (i < n && argv[i]) {
+ val[i] = strtoul(argv[i], &endptr, base);
+ if (*endptr) {
+ p_err("error parsing byte: %s", argv[i]);
+ return NULL;
+ }
+ i++;
+ }
+
+ if (i != n) {
+ p_err("%s expected %d bytes got %d", name, n, i);
+ return NULL;
+ }
+
+ return argv + i;
+}
+
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+ unsigned int i, n, step;
+
+ if (!map_is_per_cpu(info->type))
+ return;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+ for (i = 1; i < n; i++)
+ memcpy(value + i * step, value, info->value_size);
+}
+
+static int parse_elem(char **argv, struct bpf_map_info *info,
+ void *key, void *value, __u32 key_size, __u32 value_size,
+ __u32 *flags, __u32 **value_fd)
+{
+ if (!*argv) {
+ if (!key && !value)
+ return 0;
+ p_err("did not find %s", key ? "key" : "value");
+ return -1;
+ }
+
+ if (is_prefix(*argv, "key")) {
+ if (!key) {
+ if (key_size)
+ p_err("duplicate key");
+ else
+ p_err("unnecessary key");
+ return -1;
+ }
+
+ argv = parse_bytes(argv + 1, "key", key, key_size);
+ if (!argv)
+ return -1;
+
+ return parse_elem(argv, info, NULL, value, key_size, value_size,
+ flags, value_fd);
+ } else if (is_prefix(*argv, "value")) {
+ int fd;
+
+ if (!value) {
+ if (value_size)
+ p_err("duplicate value");
+ else
+ p_err("unnecessary value");
+ return -1;
+ }
+
+ argv++;
+
+ if (map_is_map_of_maps(info->type)) {
+ int argc = 2;
+
+ if (value_size != 4) {
+ p_err("value smaller than 4B for map in map?");
+ return -1;
+ }
+ if (!argv[0] || !argv[1]) {
+ p_err("not enough value arguments for map in map");
+ return -1;
+ }
+
+ fd = map_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ *value_fd = value;
+ **value_fd = fd;
+ } else if (map_is_map_of_progs(info->type)) {
+ int argc = 2;
+
+ if (value_size != 4) {
+ p_err("value smaller than 4B for map of progs?");
+ return -1;
+ }
+ if (!argv[0] || !argv[1]) {
+ p_err("not enough value arguments for map of progs");
+ return -1;
+ }
+ if (is_prefix(*argv, "id"))
+ p_info("Warning: updating program array via MAP_ID, make sure this map is kept open\n"
+ " by some process or pinned otherwise update will be lost");
+
+ fd = prog_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ *value_fd = value;
+ **value_fd = fd;
+ } else {
+ argv = parse_bytes(argv, "value", value, value_size);
+ if (!argv)
+ return -1;
+
+ fill_per_cpu_value(info, value);
+ }
+
+ return parse_elem(argv, info, key, NULL, key_size, value_size,
+ flags, NULL);
+ } else if (is_prefix(*argv, "any") || is_prefix(*argv, "noexist") ||
+ is_prefix(*argv, "exist")) {
+ if (!flags) {
+ p_err("flags specified multiple times: %s", *argv);
+ return -1;
+ }
+
+ if (is_prefix(*argv, "any"))
+ *flags = BPF_ANY;
+ else if (is_prefix(*argv, "noexist"))
+ *flags = BPF_NOEXIST;
+ else if (is_prefix(*argv, "exist"))
+ *flags = BPF_EXIST;
+
+ return parse_elem(argv + 1, info, key, value, key_size,
+ value_size, NULL, value_fd);
+ }
+
+ p_err("expected key or value, got: %s", *argv);
+ return -1;
+}
+
+static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr)
+{
+ const char *map_type_str;
+
+ jsonw_uint_field(wtr, "id", info->id);
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ jsonw_string_field(wtr, "type", map_type_str);
+ else
+ jsonw_uint_field(wtr, "type", info->type);
+
+ if (*info->name)
+ jsonw_string_field(wtr, "name", info->name);
+
+ jsonw_name(wtr, "flags");
+ jsonw_printf(wtr, "%d", info->map_flags);
+}
+
+static int show_map_close_json(int fd, struct bpf_map_info *info)
+{
+ char *memlock, *frozen_str;
+ int frozen = 0;
+
+ memlock = get_fdinfo(fd, "memlock");
+ frozen_str = get_fdinfo(fd, "frozen");
+
+ jsonw_start_object(json_wtr);
+
+ show_map_header_json(info, json_wtr);
+
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
+ jsonw_uint_field(json_wtr, "bytes_key", info->key_size);
+ jsonw_uint_field(json_wtr, "bytes_value", info->value_size);
+ jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
+
+ if (memlock)
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
+ free(memlock);
+
+ if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
+ char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
+ char *owner_jited = get_fdinfo(fd, "owner_jited");
+
+ if (owner_prog_type) {
+ unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "owner_prog_type",
+ prog_type_str);
+ else
+ jsonw_uint_field(json_wtr, "owner_prog_type",
+ prog_type);
+ }
+ if (owner_jited)
+ jsonw_bool_field(json_wtr, "owner_jited",
+ !!atoi(owner_jited));
+
+ free(owner_prog_type);
+ free(owner_jited);
+ }
+ close(fd);
+
+ if (frozen_str) {
+ frozen = atoi(frozen_str);
+ free(frozen_str);
+ }
+ jsonw_int_field(json_wtr, "frozen", frozen);
+
+ if (info->btf_id)
+ jsonw_int_field(json_wtr, "btf_id", info->btf_id);
+
+ if (!hashmap__empty(map_table)) {
+ struct hashmap_entry *entry;
+
+ jsonw_name(json_wtr, "pinned");
+ jsonw_start_array(json_wtr);
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
+ jsonw_end_array(json_wtr);
+ }
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
+ jsonw_end_object(json_wtr);
+
+ return 0;
+}
+
+static void show_map_header_plain(struct bpf_map_info *info)
+{
+ const char *map_type_str;
+
+ printf("%u: ", info->id);
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ printf("%s ", map_type_str);
+ else
+ printf("type %u ", info->type);
+
+ if (*info->name)
+ printf("name %s ", info->name);
+
+ printf("flags 0x%x", info->map_flags);
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
+ printf("\n");
+}
+
+static int show_map_close_plain(int fd, struct bpf_map_info *info)
+{
+ char *memlock, *frozen_str;
+ int frozen = 0;
+
+ memlock = get_fdinfo(fd, "memlock");
+ frozen_str = get_fdinfo(fd, "frozen");
+
+ show_map_header_plain(info);
+ printf("\tkey %uB value %uB max_entries %u",
+ info->key_size, info->value_size, info->max_entries);
+
+ if (memlock)
+ printf(" memlock %sB", memlock);
+ free(memlock);
+
+ if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
+ char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
+ char *owner_jited = get_fdinfo(fd, "owner_jited");
+
+ if (owner_prog_type || owner_jited)
+ printf("\n\t");
+ if (owner_prog_type) {
+ unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
+ printf("owner_prog_type %s ", prog_type_str);
+ else
+ printf("owner_prog_type %d ", prog_type);
+ }
+ if (owner_jited)
+ printf("owner%s jited",
+ atoi(owner_jited) ? "" : " not");
+
+ free(owner_prog_type);
+ free(owner_jited);
+ }
+ close(fd);
+
+ if (!hashmap__empty(map_table)) {
+ struct hashmap_entry *entry;
+
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
+ }
+
+ if (frozen_str) {
+ frozen = atoi(frozen_str);
+ free(frozen_str);
+ }
+
+ if (info->btf_id || frozen)
+ printf("\n\t");
+
+ if (info->btf_id)
+ printf("btf_id %d", info->btf_id);
+
+ if (frozen)
+ printf("%sfrozen", info->btf_id ? " " : "");
+
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
+ printf("\n");
+ return 0;
+}
+
+static int do_show_subset(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int *fds = NULL;
+ int nb_fds, i;
+ int err = -1;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = map_parse_fds(&argc, &argv, &fds);
+ if (nb_fds < 1)
+ goto exit_free;
+
+ if (json_output && nb_fds > 1)
+ jsonw_start_array(json_wtr); /* root array */
+ for (i = 0; i < nb_fds; i++) {
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
+ if (err) {
+ p_err("can't get map info: %s",
+ strerror(errno));
+ for (; i < nb_fds; i++)
+ close(fds[i]);
+ break;
+ }
+
+ if (json_output)
+ show_map_close_json(fds[i], &info);
+ else
+ show_map_close_plain(fds[i], &info);
+
+ close(fds[i]);
+ }
+ if (json_output && nb_fds > 1)
+ jsonw_end_array(json_wtr); /* root array */
+
+exit_free:
+ free(fds);
+ return err;
+}
+
+static int do_show(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ if (show_pinned) {
+ map_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(map_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(map_table, BPF_OBJ_MAP);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
+
+ if (argc == 2)
+ return do_show_subset(argc, argv);
+
+ if (argc)
+ return BAD_ARG();
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ while (true) {
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ p_err("can't get next map: %s%s", strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ break;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ break;
+ }
+
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ break;
+ }
+
+ if (json_output)
+ show_map_close_json(fd, &info);
+ else
+ show_map_close_plain(fd, &info);
+ }
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(map_table);
+
+ return errno == ENOENT ? 0 : -1;
+}
+
+static int dump_map_elem(int fd, void *key, void *value,
+ struct bpf_map_info *map_info, struct btf *btf,
+ json_writer_t *btf_wtr)
+{
+ if (bpf_map_lookup_elem(fd, key, value)) {
+ print_entry_error(map_info, key, errno);
+ return -1;
+ }
+
+ if (json_output) {
+ print_entry_json(map_info, key, value, btf);
+ } else if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, map_info, key, value);
+ } else {
+ print_entry_plain(map_info, key, value);
+ }
+
+ return 0;
+}
+
+static int maps_have_btf(int *fds, int nb_fds)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int err, i;
+
+ for (i = 0; i < nb_fds; i++) {
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ return -1;
+ }
+
+ if (!info.btf_id)
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct btf *btf_vmlinux;
+
+static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
+{
+ int err = 0;
+
+ if (info->btf_vmlinux_value_type_id) {
+ if (!btf_vmlinux) {
+ btf_vmlinux = libbpf_find_kernel_btf();
+ if (!btf_vmlinux) {
+ p_err("failed to get kernel btf");
+ return -errno;
+ }
+ }
+ *btf = btf_vmlinux;
+ } else if (info->btf_value_type_id) {
+ *btf = btf__load_from_kernel_by_id(info->btf_id);
+ if (!*btf) {
+ err = -errno;
+ p_err("failed to get btf");
+ }
+ } else {
+ *btf = NULL;
+ }
+
+ return err;
+}
+
+static void free_map_kv_btf(struct btf *btf)
+{
+ if (btf != btf_vmlinux)
+ btf__free(btf);
+}
+
+static int
+map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
+ bool show_header)
+{
+ void *key, *value, *prev_key;
+ unsigned int num_elems = 0;
+ struct btf *btf = NULL;
+ int err;
+
+ key = malloc(info->key_size);
+ value = alloc_value(info);
+ if (!key || !value) {
+ p_err("mem alloc failed");
+ err = -1;
+ goto exit_free;
+ }
+
+ prev_key = NULL;
+
+ if (wtr) {
+ err = get_map_kv_btf(info, &btf);
+ if (err) {
+ goto exit_free;
+ }
+
+ if (show_header) {
+ jsonw_start_object(wtr); /* map object */
+ show_map_header_json(info, wtr);
+ jsonw_name(wtr, "elements");
+ }
+ jsonw_start_array(wtr); /* elements */
+ } else if (show_header) {
+ show_map_header_plain(info);
+ }
+
+ if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
+ info->value_size != 8) {
+ const char *map_type_str;
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ p_info("Warning: cannot read values from %s map with value_size != 8",
+ map_type_str);
+ }
+ while (true) {
+ err = bpf_map_get_next_key(fd, prev_key, key);
+ if (err) {
+ if (errno == ENOENT)
+ err = 0;
+ break;
+ }
+ if (!dump_map_elem(fd, key, value, info, btf, wtr))
+ num_elems++;
+ prev_key = key;
+ }
+
+ if (wtr) {
+ jsonw_end_array(wtr); /* elements */
+ if (show_header)
+ jsonw_end_object(wtr); /* map object */
+ } else {
+ printf("Found %u element%s\n", num_elems,
+ num_elems != 1 ? "s" : "");
+ }
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+ free_map_kv_btf(btf);
+
+ return err;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ json_writer_t *wtr = NULL, *btf_wtr = NULL;
+ struct bpf_map_info info = {};
+ int nb_fds, i = 0;
+ __u32 len = sizeof(info);
+ int *fds = NULL;
+ int err = -1;
+
+ if (argc != 2)
+ usage();
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = map_parse_fds(&argc, &argv, &fds);
+ if (nb_fds < 1)
+ goto exit_free;
+
+ if (json_output) {
+ wtr = json_wtr;
+ } else {
+ int do_plain_btf;
+
+ do_plain_btf = maps_have_btf(fds, nb_fds);
+ if (do_plain_btf < 0)
+ goto exit_close;
+
+ if (do_plain_btf) {
+ btf_wtr = get_btf_writer();
+ wtr = btf_wtr;
+ if (!btf_wtr)
+ p_info("failed to create json writer for btf. falling back to plain output");
+ }
+ }
+
+ if (wtr && nb_fds > 1)
+ jsonw_start_array(wtr); /* root array */
+ for (i = 0; i < nb_fds; i++) {
+ if (bpf_map_get_info_by_fd(fds[i], &info, &len)) {
+ p_err("can't get map info: %s", strerror(errno));
+ break;
+ }
+ err = map_dump(fds[i], &info, wtr, nb_fds > 1);
+ if (!wtr && i != nb_fds - 1)
+ printf("\n");
+
+ if (err)
+ break;
+ close(fds[i]);
+ }
+ if (wtr && nb_fds > 1)
+ jsonw_end_array(wtr); /* root array */
+
+ if (btf_wtr)
+ jsonw_destroy(&btf_wtr);
+exit_close:
+ for (; i < nb_fds; i++)
+ close(fds[i]);
+exit_free:
+ free(fds);
+ btf__free(btf_vmlinux);
+ return err;
+}
+
+static int alloc_key_value(struct bpf_map_info *info, void **key, void **value)
+{
+ *key = NULL;
+ *value = NULL;
+
+ if (info->key_size) {
+ *key = malloc(info->key_size);
+ if (!*key) {
+ p_err("key mem alloc failed");
+ return -1;
+ }
+ }
+
+ if (info->value_size) {
+ *value = alloc_value(info);
+ if (!*value) {
+ p_err("value mem alloc failed");
+ free(*key);
+ *key = NULL;
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int do_update(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ __u32 *value_fd = NULL;
+ __u32 flags = BPF_ANY;
+ void *key, *value;
+ int fd, err;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = parse_elem(argv, &info, key, value, info.key_size,
+ info.value_size, &flags, &value_fd);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_update_elem(fd, key, value, flags);
+ if (err) {
+ p_err("update failed: %s", strerror(errno));
+ goto exit_free;
+ }
+
+exit_free:
+ if (value_fd)
+ close(*value_fd);
+ free(key);
+ free(value);
+ close(fd);
+
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+static void print_key_value(struct bpf_map_info *info, void *key,
+ void *value)
+{
+ json_writer_t *btf_wtr;
+ struct btf *btf;
+
+ if (get_map_kv_btf(info, &btf))
+ return;
+
+ if (json_output) {
+ print_entry_json(info, key, value, btf);
+ } else if (btf) {
+ /* if here json_wtr wouldn't have been initialised,
+ * so let's create separate writer for btf
+ */
+ btf_wtr = get_btf_writer();
+ if (!btf_wtr) {
+ p_info("failed to create json writer for btf. falling back to plain output");
+ btf__free(btf);
+ btf = NULL;
+ print_entry_plain(info, key, value);
+ } else {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ do_dump_btf(&d, info, key, value);
+ jsonw_destroy(&btf_wtr);
+ }
+ } else {
+ print_entry_plain(info, key, value);
+ }
+ btf__free(btf);
+}
+
+static int do_lookup(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output) {
+ jsonw_null(json_wtr);
+ } else {
+ printf("key:\n");
+ fprint_hex(stdout, key, info.key_size, " ");
+ printf("\n\nNot found\n");
+ }
+ } else {
+ p_err("lookup failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ /* here means bpf_map_lookup_elem() succeeded */
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
+static int do_getnext(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *nextkey;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ key = malloc(info.key_size);
+ nextkey = malloc(info.key_size);
+ if (!key || !nextkey) {
+ p_err("mem alloc failed");
+ err = -1;
+ goto exit_free;
+ }
+
+ if (argc) {
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0,
+ NULL, NULL);
+ if (err)
+ goto exit_free;
+ } else {
+ free(key);
+ key = NULL;
+ }
+
+ err = bpf_map_get_next_key(fd, key, nextkey);
+ if (err) {
+ p_err("can't get next key: %s", strerror(errno));
+ goto exit_free;
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ if (key) {
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, info.key_size);
+ } else {
+ jsonw_null_field(json_wtr, "key");
+ }
+ jsonw_name(json_wtr, "next_key");
+ print_hex_data_json(nextkey, info.key_size);
+ jsonw_end_object(json_wtr);
+ } else {
+ if (key) {
+ printf("key:\n");
+ fprint_hex(stdout, key, info.key_size, " ");
+ printf("\n");
+ } else {
+ printf("key: None\n");
+ }
+ printf("next key:\n");
+ fprint_hex(stdout, nextkey, info.key_size, " ");
+ printf("\n");
+ }
+
+exit_free:
+ free(nextkey);
+ free(key);
+ close(fd);
+
+ return err;
+}
+
+static int do_delete(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ key = malloc(info.key_size);
+ if (!key) {
+ p_err("mem alloc failed");
+ err = -1;
+ goto exit_free;
+ }
+
+ err = parse_elem(argv, &info, key, NULL, info.key_size, 0, NULL, NULL);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_delete_elem(fd, key);
+ if (err)
+ p_err("delete failed: %s", strerror(errno));
+
+exit_free:
+ free(key);
+ close(fd);
+
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+static int do_pin(int argc, char **argv)
+{
+ int err;
+
+ err = do_pin_any(argc, argv, map_parse_fd);
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+static int do_create(int argc, char **argv)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, attr);
+ enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
+ __u32 key_size = 0, value_size = 0, max_entries = 0;
+ const char *map_name = NULL;
+ const char *pinfile;
+ int err = -1, fd;
+
+ if (!REQ_ARGS(7))
+ return -1;
+ pinfile = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "type")) {
+ NEXT_ARG();
+
+ if (map_type) {
+ p_err("map type already specified");
+ goto exit;
+ }
+
+ map_type = map_type_from_str(*argv);
+ if ((int)map_type < 0) {
+ p_err("unrecognized map type: %s", *argv);
+ goto exit;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+ map_name = GET_ARG();
+ } else if (is_prefix(*argv, "key")) {
+ if (parse_u32_arg(&argc, &argv, &key_size,
+ "key size"))
+ goto exit;
+ } else if (is_prefix(*argv, "value")) {
+ if (parse_u32_arg(&argc, &argv, &value_size,
+ "value size"))
+ goto exit;
+ } else if (is_prefix(*argv, "entries")) {
+ if (parse_u32_arg(&argc, &argv, &max_entries,
+ "max entries"))
+ goto exit;
+ } else if (is_prefix(*argv, "flags")) {
+ if (parse_u32_arg(&argc, &argv, &attr.map_flags,
+ "flags"))
+ goto exit;
+ } else if (is_prefix(*argv, "dev")) {
+ p_info("Warning: 'bpftool map create [...] dev <ifname>' syntax is deprecated.\n"
+ "Going further, please use 'offload_dev <ifname>' to request hardware offload for the map.");
+ goto offload_dev;
+ } else if (is_prefix(*argv, "offload_dev")) {
+offload_dev:
+ NEXT_ARG();
+
+ if (attr.map_ifindex) {
+ p_err("offload device already specified");
+ goto exit;
+ }
+
+ attr.map_ifindex = if_nametoindex(*argv);
+ if (!attr.map_ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto exit;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "inner_map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int inner_map_fd;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(2))
+ usage();
+ inner_map_fd = map_parse_fd_and_info(&argc, &argv,
+ &info, &len);
+ if (inner_map_fd < 0)
+ return -1;
+ attr.inner_map_fd = inner_map_fd;
+ } else {
+ p_err("unknown arg %s", *argv);
+ goto exit;
+ }
+ }
+
+ if (!map_name) {
+ p_err("map name not specified");
+ goto exit;
+ }
+
+ set_max_rlimit();
+
+ fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
+ if (fd < 0) {
+ p_err("map create failed: %s", strerror(errno));
+ goto exit;
+ }
+
+ err = do_pin_fd(fd, pinfile);
+ close(fd);
+ if (err)
+ goto exit;
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+exit:
+ if (attr.inner_map_fd > 0)
+ close(attr.inner_map_fd);
+
+ return err;
+}
+
+static int do_pop_dequeue(int argc, char **argv)
+{
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ void *key, *value;
+ int err;
+ int fd;
+
+ if (argc < 2)
+ usage();
+
+ fd = map_parse_fd_and_info(&argc, &argv, &info, &len);
+ if (fd < 0)
+ return -1;
+
+ err = alloc_key_value(&info, &key, &value);
+ if (err)
+ goto exit_free;
+
+ err = bpf_map_lookup_and_delete_elem(fd, key, value);
+ if (err) {
+ if (errno == ENOENT) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ else
+ printf("Error: empty map\n");
+ } else {
+ p_err("pop failed: %s", strerror(errno));
+ }
+
+ goto exit_free;
+ }
+
+ print_key_value(&info, key, value);
+
+exit_free:
+ free(key);
+ free(value);
+ close(fd);
+
+ return err;
+}
+
+static int do_freeze(int argc, char **argv)
+{
+ int err, fd;
+
+ if (!REQ_ARGS(2))
+ return -1;
+
+ fd = map_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ if (argc) {
+ close(fd);
+ return BAD_ARG();
+ }
+
+ err = bpf_map_freeze(fd);
+ close(fd);
+ if (err) {
+ p_err("failed to freeze map: %s", strerror(errno));
+ return err;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [MAP]\n"
+ " %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
+ " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
+ " [inner_map MAP] [offload_dev NAME]\n"
+ " %1$s %2$s dump MAP\n"
+ " %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %1$s %2$s lookup MAP [key DATA]\n"
+ " %1$s %2$s getnext MAP [key DATA]\n"
+ " %1$s %2$s delete MAP key DATA\n"
+ " %1$s %2$s pin MAP FILE\n"
+ " %1$s %2$s event_pipe MAP [cpu N index M]\n"
+ " %1$s %2$s peek MAP\n"
+ " %1$s %2$s push MAP value VALUE\n"
+ " %1$s %2$s pop MAP\n"
+ " %1$s %2$s enqueue MAP value VALUE\n"
+ " %1$s %2$s dequeue MAP\n"
+ " %1$s %2$s freeze MAP\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_MAP "\n"
+ " DATA := { [hex] BYTES }\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " VALUE := { DATA | MAP | PROG }\n"
+ " UPDATE_FLAGS := { any | exist | noexist }\n"
+ " TYPE := { hash | array | prog_array | perf_event_array | percpu_hash |\n"
+ " percpu_array | stack_trace | cgroup_array | lru_hash |\n"
+ " lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
+ " devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
+ " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
+ " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
+ " task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-n|--nomount} }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { "dump", do_dump },
+ { "update", do_update },
+ { "lookup", do_lookup },
+ { "getnext", do_getnext },
+ { "delete", do_delete },
+ { "pin", do_pin },
+ { "event_pipe", do_event_pipe },
+ { "create", do_create },
+ { "peek", do_lookup },
+ { "push", do_update },
+ { "enqueue", do_update },
+ { "pop", do_pop_dequeue },
+ { "dequeue", do_pop_dequeue },
+ { "freeze", do_freeze },
+ { 0 }
+};
+
+int do_map(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
new file mode 100644
index 0000000000..21d7d447e1
--- /dev/null
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+/* This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <bpf/libbpf.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+
+#include <bpf/bpf.h>
+
+#include "main.h"
+
+#define MMAP_PAGE_CNT 16
+
+static volatile bool stop;
+
+struct perf_event_sample {
+ struct perf_event_header header;
+ __u64 time;
+ __u32 size;
+ unsigned char data[];
+};
+
+struct perf_event_lost {
+ struct perf_event_header header;
+ __u64 id;
+ __u64 lost;
+};
+
+static void int_exit(int signo)
+{
+ fprintf(stderr, "Stopping...\n");
+ stop = true;
+}
+
+struct event_pipe_ctx {
+ bool all_cpus;
+ int cpu;
+ int idx;
+};
+
+static enum bpf_perf_event_ret
+print_bpf_output(void *private_data, int cpu, struct perf_event_header *event)
+{
+ struct perf_event_sample *e = container_of(event,
+ struct perf_event_sample,
+ header);
+ struct perf_event_lost *lost = container_of(event,
+ struct perf_event_lost,
+ header);
+ struct event_pipe_ctx *ctx = private_data;
+ int idx = ctx->all_cpus ? cpu : ctx->idx;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "type");
+ jsonw_uint(json_wtr, e->header.type);
+ jsonw_name(json_wtr, "cpu");
+ jsonw_uint(json_wtr, cpu);
+ jsonw_name(json_wtr, "index");
+ jsonw_uint(json_wtr, idx);
+ if (e->header.type == PERF_RECORD_SAMPLE) {
+ jsonw_name(json_wtr, "timestamp");
+ jsonw_uint(json_wtr, e->time);
+ jsonw_name(json_wtr, "data");
+ print_data_json(e->data, e->size);
+ } else if (e->header.type == PERF_RECORD_LOST) {
+ jsonw_name(json_wtr, "lost");
+ jsonw_start_object(json_wtr);
+ jsonw_name(json_wtr, "id");
+ jsonw_uint(json_wtr, lost->id);
+ jsonw_name(json_wtr, "count");
+ jsonw_uint(json_wtr, lost->lost);
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_object(json_wtr);
+ } else {
+ if (e->header.type == PERF_RECORD_SAMPLE) {
+ printf("== @%lld.%09lld CPU: %d index: %d =====\n",
+ e->time / 1000000000ULL, e->time % 1000000000ULL,
+ cpu, idx);
+ fprint_hex(stdout, e->data, e->size, " ");
+ printf("\n");
+ } else if (e->header.type == PERF_RECORD_LOST) {
+ printf("lost %lld events\n", lost->lost);
+ } else {
+ printf("unknown event type=%d size=%d\n",
+ e->header.type, e->header.size);
+ }
+ }
+
+ return LIBBPF_PERF_EVENT_CONT;
+}
+
+int do_event_pipe(int argc, char **argv)
+{
+ struct perf_event_attr perf_attr = {
+ .sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_TIME,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ .sample_period = 1,
+ .wakeup_events = 1,
+ };
+ struct bpf_map_info map_info = {};
+ LIBBPF_OPTS(perf_buffer_raw_opts, opts);
+ struct event_pipe_ctx ctx = {
+ .all_cpus = true,
+ .cpu = -1,
+ .idx = -1,
+ };
+ struct perf_buffer *pb;
+ __u32 map_info_len;
+ int err, map_fd;
+
+ map_info_len = sizeof(map_info);
+ map_fd = map_parse_fd_and_info(&argc, &argv, &map_info, &map_info_len);
+ if (map_fd < 0)
+ return -1;
+
+ if (map_info.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+ p_err("map is not a perf event array");
+ goto err_close_map;
+ }
+
+ while (argc) {
+ if (argc < 2) {
+ BAD_ARG();
+ goto err_close_map;
+ }
+
+ if (is_prefix(*argv, "cpu")) {
+ char *endptr;
+
+ NEXT_ARG();
+ ctx.cpu = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as CPU ID", *argv);
+ goto err_close_map;
+ }
+
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "index")) {
+ char *endptr;
+
+ NEXT_ARG();
+ ctx.idx = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as index", *argv);
+ goto err_close_map;
+ }
+
+ NEXT_ARG();
+ } else {
+ BAD_ARG();
+ goto err_close_map;
+ }
+
+ ctx.all_cpus = false;
+ }
+
+ if (!ctx.all_cpus) {
+ if (ctx.idx == -1 || ctx.cpu == -1) {
+ p_err("cpu and index must be specified together");
+ goto err_close_map;
+ }
+ } else {
+ ctx.cpu = 0;
+ ctx.idx = 0;
+ }
+
+ opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
+ opts.cpus = &ctx.cpu;
+ opts.map_keys = &ctx.idx;
+ pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
+ print_bpf_output, &ctx, &opts);
+ if (!pb) {
+ p_err("failed to create perf buffer: %s (%d)",
+ strerror(errno), errno);
+ goto err_close_map;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGHUP, int_exit);
+ signal(SIGTERM, int_exit);
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ while (!stop) {
+ err = perf_buffer__poll(pb, 200);
+ if (err < 0 && err != -EINTR) {
+ p_err("perf buffer polling failed: %s (%d)",
+ strerror(errno), errno);
+ goto err_close_pb;
+ }
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ perf_buffer__free(pb);
+ close(map_fd);
+
+ return 0;
+
+err_close_pb:
+ perf_buffer__free(pb);
+err_close_map:
+ close(map_fd);
+ return -1;
+}
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
new file mode 100644
index 0000000000..66a8ce8ae0
--- /dev/null
+++ b/tools/bpf/bpftool/net.c
@@ -0,0 +1,953 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2018 Facebook
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <net/if.h>
+#include <linux/rtnetlink.h>
+#include <linux/socket.h>
+#include <linux/tc_act/tc_bpf.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "bpf/nlattr.h"
+#include "main.h"
+#include "netlink_dumper.h"
+
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
+struct ip_devname_ifindex {
+ char devname[64];
+ int ifindex;
+};
+
+struct bpf_netdev_t {
+ struct ip_devname_ifindex *devices;
+ int used_len;
+ int array_len;
+ int filter_idx;
+};
+
+struct tc_kind_handle {
+ char kind[64];
+ int handle;
+};
+
+struct bpf_tcinfo_t {
+ struct tc_kind_handle *handle_array;
+ int used_len;
+ int array_len;
+ bool is_qdisc;
+};
+
+struct bpf_filter_t {
+ const char *kind;
+ const char *devname;
+ int ifindex;
+};
+
+struct bpf_attach_info {
+ __u32 flow_dissector_id;
+};
+
+enum net_attach_type {
+ NET_ATTACH_TYPE_XDP,
+ NET_ATTACH_TYPE_XDP_GENERIC,
+ NET_ATTACH_TYPE_XDP_DRIVER,
+ NET_ATTACH_TYPE_XDP_OFFLOAD,
+};
+
+static const char * const attach_type_strings[] = {
+ [NET_ATTACH_TYPE_XDP] = "xdp",
+ [NET_ATTACH_TYPE_XDP_GENERIC] = "xdpgeneric",
+ [NET_ATTACH_TYPE_XDP_DRIVER] = "xdpdrv",
+ [NET_ATTACH_TYPE_XDP_OFFLOAD] = "xdpoffload",
+};
+
+static const char * const attach_loc_strings[] = {
+ [BPF_TCX_INGRESS] = "tcx/ingress",
+ [BPF_TCX_EGRESS] = "tcx/egress",
+};
+
+const size_t net_attach_type_size = ARRAY_SIZE(attach_type_strings);
+
+static enum net_attach_type parse_attach_type(const char *str)
+{
+ enum net_attach_type type;
+
+ for (type = 0; type < net_attach_type_size; type++) {
+ if (attach_type_strings[type] &&
+ is_prefix(str, attach_type_strings[type]))
+ return type;
+ }
+
+ return net_attach_type_size;
+}
+
+typedef int (*dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+
+typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, dump_nlmsg_t, void *cookie);
+
+static int netlink_open(__u32 *nl_pid)
+{
+ struct sockaddr_nl sa;
+ socklen_t addrlen;
+ int one = 1, ret;
+ int sock;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0)
+ return -errno;
+
+ if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one)) < 0) {
+ p_err("Netlink error reporting not supported");
+ }
+
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ addrlen = sizeof(sa);
+ if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (addrlen != sizeof(sa)) {
+ ret = -LIBBPF_ERRNO__INTERNAL;
+ goto cleanup;
+ }
+
+ *nl_pid = sa.nl_pid;
+ return sock;
+
+cleanup:
+ close(sock);
+ return ret;
+}
+
+static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
+ __dump_nlmsg_t _fn, dump_nlmsg_t fn,
+ void *cookie)
+{
+ bool multipart = true;
+ struct nlmsgerr *err;
+ struct nlmsghdr *nh;
+ char buf[4096];
+ int len, ret;
+
+ while (multipart) {
+ multipart = false;
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len < 0) {
+ ret = -errno;
+ goto done;
+ }
+
+ if (len == 0)
+ break;
+
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, (unsigned int)len);
+ nh = NLMSG_NEXT(nh, len)) {
+ if (nh->nlmsg_pid != nl_pid) {
+ ret = -LIBBPF_ERRNO__WRNGPID;
+ goto done;
+ }
+ if (nh->nlmsg_seq != seq) {
+ ret = -LIBBPF_ERRNO__INVSEQ;
+ goto done;
+ }
+ if (nh->nlmsg_flags & NLM_F_MULTI)
+ multipart = true;
+ switch (nh->nlmsg_type) {
+ case NLMSG_ERROR:
+ err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ if (!err->error)
+ continue;
+ ret = err->error;
+ libbpf_nla_dump_errormsg(nh);
+ goto done;
+ case NLMSG_DONE:
+ return 0;
+ default:
+ break;
+ }
+ if (_fn) {
+ ret = _fn(nh, fn, cookie);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+static int __dump_class_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_class_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_class_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_class(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_class_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTCLASS,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
+ dump_class_nlmsg, cookie);
+}
+
+static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_qdisc_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_qdisc_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETQDISC,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
+ dump_qdisc_nlmsg, cookie);
+}
+
+static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_filter_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_filter_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
+ dump_nlmsg_t dump_filter_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTFILTER,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ .t.tcm_parent = handle,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
+ dump_filter_nlmsg, cookie);
+}
+
+static int __dump_link_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *attr;
+ struct ifinfomsg *ifi = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
+ attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+ if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_link_nlmsg(cookie, ifi, tb);
+}
+
+static int netlink_get_link(int sock, unsigned int nl_pid,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct ifinfomsg ifm;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlh.nlmsg_type = RTM_GETLINK,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifm.ifi_family = AF_PACKET,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
+ dump_link_nlmsg, cookie);
+}
+
+static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct bpf_netdev_t *netinfo = cookie;
+ struct ifinfomsg *ifinfo = msg;
+
+ if (netinfo->filter_idx > 0 && netinfo->filter_idx != ifinfo->ifi_index)
+ return 0;
+
+ if (netinfo->used_len == netinfo->array_len) {
+ netinfo->devices = realloc(netinfo->devices,
+ (netinfo->array_len + 16) *
+ sizeof(struct ip_devname_ifindex));
+ if (!netinfo->devices)
+ return -ENOMEM;
+
+ netinfo->array_len += 16;
+ }
+ netinfo->devices[netinfo->used_len].ifindex = ifinfo->ifi_index;
+ snprintf(netinfo->devices[netinfo->used_len].devname,
+ sizeof(netinfo->devices[netinfo->used_len].devname),
+ "%s",
+ tb[IFLA_IFNAME]
+ ? libbpf_nla_getattr_str(tb[IFLA_IFNAME])
+ : "");
+ netinfo->used_len++;
+
+ return do_xdp_dump(ifinfo, tb);
+}
+
+static int dump_class_qdisc_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ struct bpf_tcinfo_t *tcinfo = cookie;
+ struct tcmsg *info = msg;
+
+ if (tcinfo->is_qdisc) {
+ /* skip clsact qdisc */
+ if (tb[TCA_KIND] &&
+ strcmp(libbpf_nla_data(tb[TCA_KIND]), "clsact") == 0)
+ return 0;
+ if (info->tcm_handle == 0)
+ return 0;
+ }
+
+ if (tcinfo->used_len == tcinfo->array_len) {
+ tcinfo->handle_array = realloc(tcinfo->handle_array,
+ (tcinfo->array_len + 16) * sizeof(struct tc_kind_handle));
+ if (!tcinfo->handle_array)
+ return -ENOMEM;
+
+ tcinfo->array_len += 16;
+ }
+ tcinfo->handle_array[tcinfo->used_len].handle = info->tcm_handle;
+ snprintf(tcinfo->handle_array[tcinfo->used_len].kind,
+ sizeof(tcinfo->handle_array[tcinfo->used_len].kind),
+ "%s",
+ tb[TCA_KIND]
+ ? libbpf_nla_getattr_str(tb[TCA_KIND])
+ : "unknown");
+ tcinfo->used_len++;
+
+ return 0;
+}
+
+static int dump_filter_nlmsg(void *cookie, void *msg, struct nlattr **tb)
+{
+ const struct bpf_filter_t *filter_info = cookie;
+
+ return do_filter_dump((struct tcmsg *)msg, tb, filter_info->kind,
+ filter_info->devname, filter_info->ifindex);
+}
+
+static int __show_dev_tc_bpf_name(__u32 id, char *name, size_t len)
+{
+ struct bpf_prog_info info = {};
+ __u32 ilen = sizeof(info);
+ int fd, ret;
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0)
+ return fd;
+ ret = bpf_obj_get_info_by_fd(fd, &info, &ilen);
+ if (ret < 0)
+ goto out;
+ ret = -ENOENT;
+ if (info.name[0]) {
+ get_prog_full_name(&info, fd, name, len);
+ ret = 0;
+ }
+out:
+ close(fd);
+ return ret;
+}
+
+static void __show_dev_tc_bpf(const struct ip_devname_ifindex *dev,
+ const enum bpf_attach_type loc)
+{
+ __u32 prog_flags[64] = {}, link_flags[64] = {}, i, j;
+ __u32 prog_ids[64] = {}, link_ids[64] = {};
+ LIBBPF_OPTS(bpf_prog_query_opts, optq);
+ char prog_name[MAX_PROG_FULL_NAME];
+ int ret;
+
+ optq.prog_ids = prog_ids;
+ optq.prog_attach_flags = prog_flags;
+ optq.link_ids = link_ids;
+ optq.link_attach_flags = link_flags;
+ optq.count = ARRAY_SIZE(prog_ids);
+
+ ret = bpf_prog_query_opts(dev->ifindex, loc, &optq);
+ if (ret)
+ return;
+ for (i = 0; i < optq.count; i++) {
+ NET_START_OBJECT;
+ NET_DUMP_STR("devname", "%s", dev->devname);
+ NET_DUMP_UINT("ifindex", "(%u)", dev->ifindex);
+ NET_DUMP_STR("kind", " %s", attach_loc_strings[loc]);
+ ret = __show_dev_tc_bpf_name(prog_ids[i], prog_name,
+ sizeof(prog_name));
+ if (!ret)
+ NET_DUMP_STR("name", " %s", prog_name);
+ NET_DUMP_UINT("prog_id", " prog_id %u ", prog_ids[i]);
+ if (prog_flags[i] || json_output) {
+ NET_START_ARRAY("prog_flags", "%s ");
+ for (j = 0; prog_flags[i] && j < 32; j++) {
+ if (!(prog_flags[i] & (1 << j)))
+ continue;
+ NET_DUMP_UINT_ONLY(1 << j);
+ }
+ NET_END_ARRAY("");
+ }
+ if (link_ids[i] || json_output) {
+ NET_DUMP_UINT("link_id", "link_id %u ", link_ids[i]);
+ if (link_flags[i] || json_output) {
+ NET_START_ARRAY("link_flags", "%s ");
+ for (j = 0; link_flags[i] && j < 32; j++) {
+ if (!(link_flags[i] & (1 << j)))
+ continue;
+ NET_DUMP_UINT_ONLY(1 << j);
+ }
+ NET_END_ARRAY("");
+ }
+ }
+ NET_END_OBJECT_FINAL;
+ }
+}
+
+static void show_dev_tc_bpf(struct ip_devname_ifindex *dev)
+{
+ __show_dev_tc_bpf(dev, BPF_TCX_INGRESS);
+ __show_dev_tc_bpf(dev, BPF_TCX_EGRESS);
+}
+
+static int show_dev_tc_bpf_classic(int sock, unsigned int nl_pid,
+ struct ip_devname_ifindex *dev)
+{
+ struct bpf_filter_t filter_info;
+ struct bpf_tcinfo_t tcinfo;
+ int i, handle, ret = 0;
+
+ tcinfo.handle_array = NULL;
+ tcinfo.used_len = 0;
+ tcinfo.array_len = 0;
+
+ tcinfo.is_qdisc = false;
+ ret = netlink_get_class(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
+ if (ret)
+ goto out;
+
+ tcinfo.is_qdisc = true;
+ ret = netlink_get_qdisc(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
+ if (ret)
+ goto out;
+
+ filter_info.devname = dev->devname;
+ filter_info.ifindex = dev->ifindex;
+ for (i = 0; i < tcinfo.used_len; i++) {
+ filter_info.kind = tcinfo.handle_array[i].kind;
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex,
+ tcinfo.handle_array[i].handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+ }
+
+ /* root, ingress and egress handle */
+ handle = TC_H_ROOT;
+ filter_info.kind = "root";
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+ handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
+ filter_info.kind = "clsact/ingress";
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+ handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
+ filter_info.kind = "clsact/egress";
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
+ if (ret)
+ goto out;
+
+out:
+ free(tcinfo.handle_array);
+ return 0;
+}
+
+static int query_flow_dissector(struct bpf_attach_info *attach_info)
+{
+ __u32 attach_flags;
+ __u32 prog_ids[1];
+ __u32 prog_cnt;
+ int err;
+ int fd;
+
+ fd = open("/proc/self/ns/net", O_RDONLY);
+ if (fd < 0) {
+ p_err("can't open /proc/self/ns/net: %s",
+ strerror(errno));
+ return -1;
+ }
+ prog_cnt = ARRAY_SIZE(prog_ids);
+ err = bpf_prog_query(fd, BPF_FLOW_DISSECTOR, 0,
+ &attach_flags, prog_ids, &prog_cnt);
+ close(fd);
+ if (err) {
+ if (errno == EINVAL) {
+ /* Older kernel's don't support querying
+ * flow dissector programs.
+ */
+ errno = 0;
+ return 0;
+ }
+ p_err("can't query prog: %s", strerror(errno));
+ return -1;
+ }
+
+ if (prog_cnt == 1)
+ attach_info->flow_dissector_id = prog_ids[0];
+
+ return 0;
+}
+
+static int net_parse_dev(int *argc, char ***argv)
+{
+ int ifindex;
+
+ if (is_prefix(**argv, "dev")) {
+ NEXT_ARGP();
+
+ ifindex = if_nametoindex(**argv);
+ if (!ifindex)
+ p_err("invalid devname %s", **argv);
+
+ NEXT_ARGP();
+ } else {
+ p_err("expected 'dev', got: '%s'?", **argv);
+ return -1;
+ }
+
+ return ifindex;
+}
+
+static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type,
+ int ifindex, bool overwrite)
+{
+ __u32 flags = 0;
+
+ if (!overwrite)
+ flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
+ if (attach_type == NET_ATTACH_TYPE_XDP_GENERIC)
+ flags |= XDP_FLAGS_SKB_MODE;
+ if (attach_type == NET_ATTACH_TYPE_XDP_DRIVER)
+ flags |= XDP_FLAGS_DRV_MODE;
+ if (attach_type == NET_ATTACH_TYPE_XDP_OFFLOAD)
+ flags |= XDP_FLAGS_HW_MODE;
+
+ return bpf_xdp_attach(ifindex, progfd, flags, NULL);
+}
+
+static int do_attach(int argc, char **argv)
+{
+ enum net_attach_type attach_type;
+ int progfd, ifindex, err = 0;
+ bool overwrite = false;
+
+ /* parse attach args */
+ if (!REQ_ARGS(5))
+ return -EINVAL;
+
+ attach_type = parse_attach_type(*argv);
+ if (attach_type == net_attach_type_size) {
+ p_err("invalid net attach/detach type: %s", *argv);
+ return -EINVAL;
+ }
+ NEXT_ARG();
+
+ progfd = prog_parse_fd(&argc, &argv);
+ if (progfd < 0)
+ return -EINVAL;
+
+ ifindex = net_parse_dev(&argc, &argv);
+ if (ifindex < 1) {
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ if (argc) {
+ if (is_prefix(*argv, "overwrite")) {
+ overwrite = true;
+ } else {
+ p_err("expected 'overwrite', got: '%s'?", *argv);
+ err = -EINVAL;
+ goto cleanup;
+ }
+ }
+
+ /* attach xdp prog */
+ if (is_prefix("xdp", attach_type_strings[attach_type]))
+ err = do_attach_detach_xdp(progfd, attach_type, ifindex,
+ overwrite);
+ if (err) {
+ p_err("interface %s attach failed: %s",
+ attach_type_strings[attach_type], strerror(-err));
+ goto cleanup;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+cleanup:
+ close(progfd);
+ return err;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum net_attach_type attach_type;
+ int progfd, ifindex, err = 0;
+
+ /* parse detach args */
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ attach_type = parse_attach_type(*argv);
+ if (attach_type == net_attach_type_size) {
+ p_err("invalid net attach/detach type: %s", *argv);
+ return -EINVAL;
+ }
+ NEXT_ARG();
+
+ ifindex = net_parse_dev(&argc, &argv);
+ if (ifindex < 1)
+ return -EINVAL;
+
+ /* detach xdp prog */
+ progfd = -1;
+ if (is_prefix("xdp", attach_type_strings[attach_type]))
+ err = do_attach_detach_xdp(progfd, attach_type, ifindex, NULL);
+
+ if (err < 0) {
+ p_err("interface %s detach failed: %s",
+ attach_type_strings[attach_type], strerror(-err));
+ return err;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int netfilter_link_compar(const void *a, const void *b)
+{
+ const struct bpf_link_info *nfa = a;
+ const struct bpf_link_info *nfb = b;
+ int delta;
+
+ delta = nfa->netfilter.pf - nfb->netfilter.pf;
+ if (delta)
+ return delta;
+
+ delta = nfa->netfilter.hooknum - nfb->netfilter.hooknum;
+ if (delta)
+ return delta;
+
+ if (nfa->netfilter.priority < nfb->netfilter.priority)
+ return -1;
+ if (nfa->netfilter.priority > nfb->netfilter.priority)
+ return 1;
+
+ return nfa->netfilter.flags - nfb->netfilter.flags;
+}
+
+static void show_link_netfilter(void)
+{
+ unsigned int nf_link_len = 0, nf_link_count = 0;
+ struct bpf_link_info *nf_link_info = NULL;
+ __u32 id = 0;
+
+ while (true) {
+ struct bpf_link_info info;
+ int fd, err;
+ __u32 len;
+
+ err = bpf_link_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ p_err("can't get next link: %s (id %d)", strerror(errno), id);
+ break;
+ }
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get link by id (%u): %s", id, strerror(errno));
+ continue;
+ }
+
+ memset(&info, 0, sizeof(info));
+ len = sizeof(info);
+
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+
+ close(fd);
+
+ if (err) {
+ p_err("can't get link info for fd %d: %s", fd, strerror(errno));
+ continue;
+ }
+
+ if (info.type != BPF_LINK_TYPE_NETFILTER)
+ continue;
+
+ if (nf_link_count >= nf_link_len) {
+ static const unsigned int max_link_count = INT_MAX / sizeof(info);
+ struct bpf_link_info *expand;
+
+ if (nf_link_count > max_link_count) {
+ p_err("cannot handle more than %u links\n", max_link_count);
+ break;
+ }
+
+ nf_link_len += 16;
+
+ expand = realloc(nf_link_info, nf_link_len * sizeof(info));
+ if (!expand) {
+ p_err("realloc: %s", strerror(errno));
+ break;
+ }
+
+ nf_link_info = expand;
+ }
+
+ nf_link_info[nf_link_count] = info;
+ nf_link_count++;
+ }
+
+ qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
+
+ for (id = 0; id < nf_link_count; id++) {
+ NET_START_OBJECT;
+ if (json_output)
+ netfilter_dump_json(&nf_link_info[id], json_wtr);
+ else
+ netfilter_dump_plain(&nf_link_info[id]);
+
+ NET_DUMP_UINT("id", " prog_id %u", nf_link_info[id].prog_id);
+ NET_END_OBJECT;
+ }
+
+ free(nf_link_info);
+}
+
+static int do_show(int argc, char **argv)
+{
+ struct bpf_attach_info attach_info = {};
+ int i, sock, ret, filter_idx = -1;
+ struct bpf_netdev_t dev_array;
+ unsigned int nl_pid = 0;
+ char err_buf[256];
+
+ if (argc == 2) {
+ filter_idx = net_parse_dev(&argc, &argv);
+ if (filter_idx < 1)
+ return -1;
+ } else if (argc != 0) {
+ usage();
+ }
+
+ ret = query_flow_dissector(&attach_info);
+ if (ret)
+ return -1;
+
+ sock = netlink_open(&nl_pid);
+ if (sock < 0) {
+ fprintf(stderr, "failed to open netlink sock\n");
+ return -1;
+ }
+
+ dev_array.devices = NULL;
+ dev_array.used_len = 0;
+ dev_array.array_len = 0;
+ dev_array.filter_idx = filter_idx;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ NET_START_OBJECT;
+ NET_START_ARRAY("xdp", "%s:\n");
+ ret = netlink_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
+ NET_END_ARRAY("\n");
+
+ if (!ret) {
+ NET_START_ARRAY("tc", "%s:\n");
+ for (i = 0; i < dev_array.used_len; i++) {
+ show_dev_tc_bpf(&dev_array.devices[i]);
+ ret = show_dev_tc_bpf_classic(sock, nl_pid,
+ &dev_array.devices[i]);
+ if (ret)
+ break;
+ }
+ NET_END_ARRAY("\n");
+ }
+
+ NET_START_ARRAY("flow_dissector", "%s:\n");
+ if (attach_info.flow_dissector_id > 0)
+ NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id);
+ NET_END_ARRAY("\n");
+
+ NET_START_ARRAY("netfilter", "%s:\n");
+ show_link_netfilter();
+ NET_END_ARRAY("\n");
+
+ NET_END_OBJECT;
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ if (ret) {
+ if (json_output)
+ jsonw_null(json_wtr);
+ libbpf_strerror(ret, err_buf, sizeof(err_buf));
+ fprintf(stderr, "Error: %s\n", err_buf);
+ }
+ free(dev_array.devices);
+ close(sock);
+ return ret;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [dev <devname>]\n"
+ " %1$s %2$s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n"
+ " %1$s %2$s detach ATTACH_TYPE dev <devname>\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "\n"
+ "Note: Only xdp, tcx, tc, flow_dissector and netfilter attachments\n"
+ " are currently supported.\n"
+ " For progs attached to cgroups, use \"bpftool cgroup\"\n"
+ " to dump program attachments. For program types\n"
+ " sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n"
+ " consult iproute2.\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "attach", do_attach },
+ { "detach", do_detach },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_net(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c
new file mode 100644
index 0000000000..5f65140b00
--- /dev/null
+++ b/tools/bpf/bpftool/netlink_dumper.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2018 Facebook
+
+#include <stdlib.h>
+#include <string.h>
+#include <bpf/libbpf.h>
+#include <linux/rtnetlink.h>
+#include <linux/tc_act/tc_bpf.h>
+
+#include "bpf/nlattr.h"
+#include "main.h"
+#include "netlink_dumper.h"
+
+static void xdp_dump_prog_id(struct nlattr **tb, int attr,
+ const char *mode,
+ bool new_json_object)
+{
+ if (!tb[attr])
+ return;
+
+ if (new_json_object)
+ NET_START_OBJECT
+ NET_DUMP_STR("mode", " %s", mode);
+ NET_DUMP_UINT("id", " id %u", libbpf_nla_getattr_u32(tb[attr]))
+ if (new_json_object)
+ NET_END_OBJECT
+}
+
+static int do_xdp_dump_one(struct nlattr *attr, unsigned int ifindex,
+ const char *name)
+{
+ struct nlattr *tb[IFLA_XDP_MAX + 1];
+ unsigned char mode;
+
+ if (libbpf_nla_parse_nested(tb, IFLA_XDP_MAX, attr, NULL) < 0)
+ return -1;
+
+ if (!tb[IFLA_XDP_ATTACHED])
+ return 0;
+
+ mode = libbpf_nla_getattr_u8(tb[IFLA_XDP_ATTACHED]);
+ if (mode == XDP_ATTACHED_NONE)
+ return 0;
+
+ NET_START_OBJECT;
+ if (name)
+ NET_DUMP_STR("devname", "%s", name);
+ NET_DUMP_UINT("ifindex", "(%d)", ifindex);
+
+ if (mode == XDP_ATTACHED_MULTI) {
+ if (json_output) {
+ jsonw_name(json_wtr, "multi_attachments");
+ jsonw_start_array(json_wtr);
+ }
+ xdp_dump_prog_id(tb, IFLA_XDP_SKB_PROG_ID, "generic", true);
+ xdp_dump_prog_id(tb, IFLA_XDP_DRV_PROG_ID, "driver", true);
+ xdp_dump_prog_id(tb, IFLA_XDP_HW_PROG_ID, "offload", true);
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ } else if (mode == XDP_ATTACHED_DRV) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "driver", false);
+ } else if (mode == XDP_ATTACHED_SKB) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "generic", false);
+ } else if (mode == XDP_ATTACHED_HW) {
+ xdp_dump_prog_id(tb, IFLA_XDP_PROG_ID, "offload", false);
+ }
+
+ NET_END_OBJECT_FINAL;
+ return 0;
+}
+
+int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb)
+{
+ if (!tb[IFLA_XDP])
+ return 0;
+
+ return do_xdp_dump_one(tb[IFLA_XDP], ifinfo->ifi_index,
+ libbpf_nla_getattr_str(tb[IFLA_IFNAME]));
+}
+
+static int do_bpf_dump_one_act(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_BPF_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (!tb[TCA_ACT_BPF_PARMS])
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ NET_START_OBJECT_NESTED2;
+ if (tb[TCA_ACT_BPF_NAME])
+ NET_DUMP_STR("name", "%s",
+ libbpf_nla_getattr_str(tb[TCA_ACT_BPF_NAME]));
+ if (tb[TCA_ACT_BPF_ID])
+ NET_DUMP_UINT("id", " id %u",
+ libbpf_nla_getattr_u32(tb[TCA_ACT_BPF_ID]));
+ NET_END_OBJECT_NESTED;
+ return 0;
+}
+
+static int do_dump_one_act(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_MAX + 1];
+
+ if (!attr)
+ return 0;
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (tb[TCA_ACT_KIND] &&
+ strcmp(libbpf_nla_data(tb[TCA_ACT_KIND]), "bpf") == 0)
+ return do_bpf_dump_one_act(tb[TCA_ACT_OPTIONS]);
+
+ return 0;
+}
+
+static int do_bpf_act_dump(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
+ int act, ret;
+
+ if (libbpf_nla_parse_nested(tb, TCA_ACT_MAX_PRIO, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ NET_START_ARRAY("act", " %s [");
+ for (act = 0; act <= TCA_ACT_MAX_PRIO; act++) {
+ ret = do_dump_one_act(tb[act]);
+ if (ret)
+ break;
+ }
+ NET_END_ARRAY("] ");
+
+ return ret;
+}
+
+static int do_bpf_filter_dump(struct nlattr *attr)
+{
+ struct nlattr *tb[TCA_BPF_MAX + 1];
+ int ret;
+
+ if (libbpf_nla_parse_nested(tb, TCA_BPF_MAX, attr, NULL) < 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ if (tb[TCA_BPF_NAME])
+ NET_DUMP_STR("name", " %s",
+ libbpf_nla_getattr_str(tb[TCA_BPF_NAME]));
+ if (tb[TCA_BPF_ID])
+ NET_DUMP_UINT("id", " id %u",
+ libbpf_nla_getattr_u32(tb[TCA_BPF_ID]));
+ if (tb[TCA_BPF_ACT]) {
+ ret = do_bpf_act_dump(tb[TCA_BPF_ACT]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int do_filter_dump(struct tcmsg *info, struct nlattr **tb, const char *kind,
+ const char *devname, int ifindex)
+{
+ int ret = 0;
+
+ if (tb[TCA_OPTIONS] &&
+ strcmp(libbpf_nla_data(tb[TCA_KIND]), "bpf") == 0) {
+ NET_START_OBJECT;
+ if (devname[0] != '\0')
+ NET_DUMP_STR("devname", "%s", devname);
+ NET_DUMP_UINT("ifindex", "(%u)", ifindex);
+ NET_DUMP_STR("kind", " %s", kind);
+ ret = do_bpf_filter_dump(tb[TCA_OPTIONS]);
+ NET_END_OBJECT_FINAL;
+ }
+
+ return ret;
+}
diff --git a/tools/bpf/bpftool/netlink_dumper.h b/tools/bpf/bpftool/netlink_dumper.h
new file mode 100644
index 0000000000..96318106fb
--- /dev/null
+++ b/tools/bpf/bpftool/netlink_dumper.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+// Copyright (C) 2018 Facebook
+
+#ifndef _NETLINK_DUMPER_H_
+#define _NETLINK_DUMPER_H_
+
+#define NET_START_OBJECT \
+{ \
+ if (json_output) \
+ jsonw_start_object(json_wtr); \
+}
+
+#define NET_START_OBJECT_NESTED(name) \
+{ \
+ if (json_output) { \
+ jsonw_name(json_wtr, name); \
+ jsonw_start_object(json_wtr); \
+ } else { \
+ fprintf(stdout, "%s {", name); \
+ } \
+}
+
+#define NET_START_OBJECT_NESTED2 \
+{ \
+ if (json_output) \
+ jsonw_start_object(json_wtr); \
+ else \
+ fprintf(stdout, "{"); \
+}
+
+#define NET_END_OBJECT_NESTED \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+ else \
+ fprintf(stdout, "}"); \
+}
+
+#define NET_END_OBJECT \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+}
+
+#define NET_END_OBJECT_FINAL \
+{ \
+ if (json_output) \
+ jsonw_end_object(json_wtr); \
+ else \
+ fprintf(stdout, "\n"); \
+}
+
+#define NET_START_ARRAY(name, fmt_str) \
+{ \
+ if (json_output) { \
+ jsonw_name(json_wtr, name); \
+ jsonw_start_array(json_wtr); \
+ } else { \
+ fprintf(stdout, fmt_str, name); \
+ } \
+}
+
+#define NET_END_ARRAY(endstr) \
+{ \
+ if (json_output) \
+ jsonw_end_array(json_wtr); \
+ else \
+ fprintf(stdout, "%s", endstr); \
+}
+
+#define NET_DUMP_UINT(name, fmt_str, val) \
+{ \
+ if (json_output) \
+ jsonw_uint_field(json_wtr, name, val); \
+ else \
+ fprintf(stdout, fmt_str, val); \
+}
+
+#define NET_DUMP_UINT_ONLY(str) \
+{ \
+ if (json_output) \
+ jsonw_uint(json_wtr, str); \
+ else \
+ fprintf(stdout, "%u ", str); \
+}
+
+#define NET_DUMP_STR(name, fmt_str, str) \
+{ \
+ if (json_output) \
+ jsonw_string_field(json_wtr, name, str);\
+ else \
+ fprintf(stdout, fmt_str, str); \
+}
+
+#define NET_DUMP_STR_ONLY(str) \
+{ \
+ if (json_output) \
+ jsonw_string(json_wtr, str); \
+ else \
+ fprintf(stdout, "%s ", str); \
+}
+
+#endif
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
new file mode 100644
index 0000000000..80de2874da
--- /dev/null
+++ b/tools/bpf/bpftool/perf.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2018 Facebook
+// Author: Yonghong Song <yhs@fb.com>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <dirent.h>
+
+#include <bpf/bpf.h>
+
+#include "main.h"
+
+/* 0: undecided, 1: supported, 2: not supported */
+static int perf_query_supported;
+static bool has_perf_query_support(void)
+{
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ char buf[256];
+ int fd;
+
+ if (perf_query_supported)
+ goto out;
+
+ fd = open("/", O_RDONLY);
+ if (fd < 0) {
+ p_err("perf_query_support: cannot open directory \"/\" (%s)",
+ strerror(errno));
+ goto out;
+ }
+
+ /* the following query will fail as no bpf attachment,
+ * the expected errno is ENOTSUPP
+ */
+ errno = 0;
+ len = sizeof(buf);
+ bpf_task_fd_query(getpid(), fd, 0, buf, &len, &prog_id,
+ &fd_type, &probe_offset, &probe_addr);
+
+ if (errno == 524 /* ENOTSUPP */) {
+ perf_query_supported = 1;
+ goto close_fd;
+ }
+
+ perf_query_supported = 2;
+ p_err("perf_query_support: %s", strerror(errno));
+ fprintf(stderr,
+ "HINT: non root or kernel doesn't support TASK_FD_QUERY\n");
+
+close_fd:
+ close(fd);
+out:
+ return perf_query_supported == 1;
+}
+
+static void print_perf_json(int pid, int fd, __u32 prog_id, __u32 fd_type,
+ char *buf, __u64 probe_offset, __u64 probe_addr)
+{
+ jsonw_start_object(json_wtr);
+ jsonw_int_field(json_wtr, "pid", pid);
+ jsonw_int_field(json_wtr, "fd", fd);
+ jsonw_uint_field(json_wtr, "prog_id", prog_id);
+ switch (fd_type) {
+ case BPF_FD_TYPE_RAW_TRACEPOINT:
+ jsonw_string_field(json_wtr, "fd_type", "raw_tracepoint");
+ jsonw_string_field(json_wtr, "tracepoint", buf);
+ break;
+ case BPF_FD_TYPE_TRACEPOINT:
+ jsonw_string_field(json_wtr, "fd_type", "tracepoint");
+ jsonw_string_field(json_wtr, "tracepoint", buf);
+ break;
+ case BPF_FD_TYPE_KPROBE:
+ jsonw_string_field(json_wtr, "fd_type", "kprobe");
+ if (buf[0] != '\0') {
+ jsonw_string_field(json_wtr, "func", buf);
+ jsonw_lluint_field(json_wtr, "offset", probe_offset);
+ } else {
+ jsonw_lluint_field(json_wtr, "addr", probe_addr);
+ }
+ break;
+ case BPF_FD_TYPE_KRETPROBE:
+ jsonw_string_field(json_wtr, "fd_type", "kretprobe");
+ if (buf[0] != '\0') {
+ jsonw_string_field(json_wtr, "func", buf);
+ jsonw_lluint_field(json_wtr, "offset", probe_offset);
+ } else {
+ jsonw_lluint_field(json_wtr, "addr", probe_addr);
+ }
+ break;
+ case BPF_FD_TYPE_UPROBE:
+ jsonw_string_field(json_wtr, "fd_type", "uprobe");
+ jsonw_string_field(json_wtr, "filename", buf);
+ jsonw_lluint_field(json_wtr, "offset", probe_offset);
+ break;
+ case BPF_FD_TYPE_URETPROBE:
+ jsonw_string_field(json_wtr, "fd_type", "uretprobe");
+ jsonw_string_field(json_wtr, "filename", buf);
+ jsonw_lluint_field(json_wtr, "offset", probe_offset);
+ break;
+ default:
+ break;
+ }
+ jsonw_end_object(json_wtr);
+}
+
+static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
+ char *buf, __u64 probe_offset, __u64 probe_addr)
+{
+ printf("pid %d fd %d: prog_id %u ", pid, fd, prog_id);
+ switch (fd_type) {
+ case BPF_FD_TYPE_RAW_TRACEPOINT:
+ printf("raw_tracepoint %s\n", buf);
+ break;
+ case BPF_FD_TYPE_TRACEPOINT:
+ printf("tracepoint %s\n", buf);
+ break;
+ case BPF_FD_TYPE_KPROBE:
+ if (buf[0] != '\0')
+ printf("kprobe func %s offset %llu\n", buf,
+ probe_offset);
+ else
+ printf("kprobe addr %llu\n", probe_addr);
+ break;
+ case BPF_FD_TYPE_KRETPROBE:
+ if (buf[0] != '\0')
+ printf("kretprobe func %s offset %llu\n", buf,
+ probe_offset);
+ else
+ printf("kretprobe addr %llu\n", probe_addr);
+ break;
+ case BPF_FD_TYPE_UPROBE:
+ printf("uprobe filename %s offset %llu\n", buf, probe_offset);
+ break;
+ case BPF_FD_TYPE_URETPROBE:
+ printf("uretprobe filename %s offset %llu\n", buf,
+ probe_offset);
+ break;
+ default:
+ break;
+ }
+}
+
+static int show_proc(void)
+{
+ struct dirent *proc_de, *pid_fd_de;
+ __u64 probe_offset, probe_addr;
+ __u32 len, prog_id, fd_type;
+ DIR *proc, *pid_fd;
+ int err, pid, fd;
+ const char *pch;
+ char buf[4096];
+
+ proc = opendir("/proc");
+ if (!proc)
+ return -1;
+
+ while ((proc_de = readdir(proc))) {
+ pid = 0;
+ pch = proc_de->d_name;
+
+ /* pid should be all numbers */
+ while (isdigit(*pch)) {
+ pid = pid * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
+ if (err < 0 || err >= (int)sizeof(buf))
+ continue;
+
+ pid_fd = opendir(buf);
+ if (!pid_fd)
+ continue;
+
+ while ((pid_fd_de = readdir(pid_fd))) {
+ fd = 0;
+ pch = pid_fd_de->d_name;
+
+ /* fd should be all numbers */
+ while (isdigit(*pch)) {
+ fd = fd * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ /* query (pid, fd) for potential perf events */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(pid, fd, 0, buf, &len,
+ &prog_id, &fd_type,
+ &probe_offset, &probe_addr);
+ if (err < 0)
+ continue;
+
+ if (json_output)
+ print_perf_json(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ else
+ print_perf_plain(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ }
+ closedir(pid_fd);
+ }
+ closedir(proc);
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ int err;
+
+ if (!has_perf_query_support())
+ return -1;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ err = show_proc();
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list }\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_perf(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
new file mode 100644
index 0000000000..00c77edb63
--- /dev/null
+++ b/tools/bpf/bpftool/pids.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+#include <errno.h>
+#include <linux/err.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
+
+#include "main.h"
+#include "skeleton/pid_iter.h"
+
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
+{
+ return -ENOTSUP;
+}
+void delete_obj_refs_table(struct hashmap *map) {}
+void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
+void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "pid_iter.skel.h"
+
+static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
+{
+ struct hashmap_entry *entry;
+ struct obj_refs *refs;
+ struct obj_ref *ref;
+ int err, i;
+ void *tmp;
+
+ hashmap__for_each_key_entry(map, entry, e->id) {
+ refs = entry->pvalue;
+
+ for (i = 0; i < refs->ref_cnt; i++) {
+ if (refs->refs[i].pid == e->pid)
+ return;
+ }
+
+ tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
+ if (!tmp) {
+ p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+ refs->refs = tmp;
+ ref = &refs->refs[refs->ref_cnt];
+ ref->pid = e->pid;
+ memcpy(ref->comm, e->comm, sizeof(ref->comm));
+ refs->ref_cnt++;
+
+ return;
+ }
+
+ /* new ref */
+ refs = calloc(1, sizeof(*refs));
+ if (!refs) {
+ p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+
+ refs->refs = malloc(sizeof(*refs->refs));
+ if (!refs->refs) {
+ free(refs);
+ p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+ ref = &refs->refs[0];
+ ref->pid = e->pid;
+ memcpy(ref->comm, e->comm, sizeof(ref->comm));
+ refs->ref_cnt = 1;
+ refs->has_bpf_cookie = e->has_bpf_cookie;
+ refs->bpf_cookie = e->bpf_cookie;
+
+ err = hashmap__append(map, e->id, refs);
+ if (err)
+ p_err("failed to append entry to hashmap for ID %u: %s",
+ e->id, strerror(errno));
+}
+
+static int __printf(2, 0)
+libbpf_print_none(__maybe_unused enum libbpf_print_level level,
+ __maybe_unused const char *format,
+ __maybe_unused va_list args)
+{
+ return 0;
+}
+
+int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
+{
+ struct pid_iter_entry *e;
+ char buf[4096 / sizeof(*e) * sizeof(*e)];
+ struct pid_iter_bpf *skel;
+ int err, ret, fd = -1, i;
+ libbpf_print_fn_t default_print;
+
+ *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(*map)) {
+ p_err("failed to create hashmap for PID references");
+ return -1;
+ }
+ set_max_rlimit();
+
+ skel = pid_iter_bpf__open();
+ if (!skel) {
+ p_err("failed to open PID iterator skeleton");
+ return -1;
+ }
+
+ skel->rodata->obj_type = type;
+
+ /* we don't want output polluted with libbpf errors if bpf_iter is not
+ * supported
+ */
+ default_print = libbpf_set_print(libbpf_print_none);
+ err = pid_iter_bpf__load(skel);
+ libbpf_set_print(default_print);
+ if (err) {
+ /* too bad, kernel doesn't support BPF iterators yet */
+ err = 0;
+ goto out;
+ }
+ err = pid_iter_bpf__attach(skel);
+ if (err) {
+ /* if we loaded above successfully, attach has to succeed */
+ p_err("failed to attach PID iterator: %d", err);
+ goto out;
+ }
+
+ fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
+ if (fd < 0) {
+ err = -errno;
+ p_err("failed to create PID iterator session: %d", err);
+ goto out;
+ }
+
+ while (true) {
+ ret = read(fd, buf, sizeof(buf));
+ if (ret < 0) {
+ if (errno == EAGAIN)
+ continue;
+ err = -errno;
+ p_err("failed to read PID iterator output: %d", err);
+ goto out;
+ }
+ if (ret == 0)
+ break;
+ if (ret % sizeof(*e)) {
+ err = -EINVAL;
+ p_err("invalid PID iterator output format");
+ goto out;
+ }
+ ret /= sizeof(*e);
+
+ e = (void *)buf;
+ for (i = 0; i < ret; i++, e++) {
+ add_ref(*map, e);
+ }
+ }
+ err = 0;
+out:
+ if (fd >= 0)
+ close(fd);
+ pid_iter_bpf__destroy(skel);
+ return err;
+}
+
+void delete_obj_refs_table(struct hashmap *map)
+{
+ struct hashmap_entry *entry;
+ size_t bkt;
+
+ if (!map)
+ return;
+
+ hashmap__for_each_entry(map, entry, bkt) {
+ struct obj_refs *refs = entry->pvalue;
+
+ free(refs->refs);
+ free(refs);
+ }
+
+ hashmap__free(map);
+}
+
+void emit_obj_refs_json(struct hashmap *map, __u32 id,
+ json_writer_t *json_writer)
+{
+ struct hashmap_entry *entry;
+
+ if (hashmap__empty(map))
+ return;
+
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
+ int i;
+
+ if (refs->ref_cnt == 0)
+ break;
+
+ if (refs->has_bpf_cookie)
+ jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
+
+ jsonw_name(json_writer, "pids");
+ jsonw_start_array(json_writer);
+ for (i = 0; i < refs->ref_cnt; i++) {
+ struct obj_ref *ref = &refs->refs[i];
+
+ jsonw_start_object(json_writer);
+ jsonw_int_field(json_writer, "pid", ref->pid);
+ jsonw_string_field(json_writer, "comm", ref->comm);
+ jsonw_end_object(json_writer);
+ }
+ jsonw_end_array(json_writer);
+ break;
+ }
+}
+
+void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
+{
+ struct hashmap_entry *entry;
+
+ if (hashmap__empty(map))
+ return;
+
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
+ int i;
+
+ if (refs->ref_cnt == 0)
+ break;
+
+ if (refs->has_bpf_cookie)
+ printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
+
+ printf("%s", prefix);
+ for (i = 0; i < refs->ref_cnt; i++) {
+ struct obj_ref *ref = &refs->refs[i];
+
+ printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
+ }
+ break;
+ }
+}
+
+
+#endif
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
new file mode 100644
index 0000000000..8443a149dd
--- /dev/null
+++ b/tools/bpf/bpftool/prog.c
@@ -0,0 +1,2514 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <dirent.h>
+
+#include <linux/err.h>
+#include <linux/perf_event.h>
+#include <linux/sizes.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
+#include <bpf/skel_internal.h>
+
+#include "cfg.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+#define BPF_METADATA_PREFIX "bpf_metadata_"
+#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
+
+enum dump_mode {
+ DUMP_JITED,
+ DUMP_XLATED,
+};
+
+static const bool attach_types[] = {
+ [BPF_SK_SKB_STREAM_PARSER] = true,
+ [BPF_SK_SKB_STREAM_VERDICT] = true,
+ [BPF_SK_SKB_VERDICT] = true,
+ [BPF_SK_MSG_VERDICT] = true,
+ [BPF_FLOW_DISSECTOR] = true,
+ [__MAX_BPF_ATTACH_TYPE] = false,
+};
+
+/* Textual representations traditionally used by the program and kept around
+ * for the sake of backwards compatibility.
+ */
+static const char * const attach_type_strings[] = {
+ [BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
+ [BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "skb_verdict",
+ [BPF_SK_MSG_VERDICT] = "msg_verdict",
+ [__MAX_BPF_ATTACH_TYPE] = NULL,
+};
+
+static struct hashmap *prog_table;
+
+static enum bpf_attach_type parse_attach_type(const char *str)
+{
+ enum bpf_attach_type type;
+
+ for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_types[type]) {
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
+ if (attach_type_strings[type] &&
+ is_prefix(str, attach_type_strings[type]))
+ return type;
+ }
+
+ return __MAX_BPF_ATTACH_TYPE;
+}
+
+static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
+ void **info_data, size_t *const info_data_sz)
+{
+ struct bpf_prog_info holder = {};
+ size_t needed = 0;
+ void *ptr;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_len = info->jited_prog_len;
+ needed += info->jited_prog_len;
+ } else {
+ holder.xlated_prog_len = info->xlated_prog_len;
+ needed += info->xlated_prog_len;
+ }
+
+ holder.nr_jited_ksyms = info->nr_jited_ksyms;
+ needed += info->nr_jited_ksyms * sizeof(__u64);
+
+ holder.nr_jited_func_lens = info->nr_jited_func_lens;
+ needed += info->nr_jited_func_lens * sizeof(__u32);
+
+ holder.nr_func_info = info->nr_func_info;
+ holder.func_info_rec_size = info->func_info_rec_size;
+ needed += info->nr_func_info * info->func_info_rec_size;
+
+ holder.nr_line_info = info->nr_line_info;
+ holder.line_info_rec_size = info->line_info_rec_size;
+ needed += info->nr_line_info * info->line_info_rec_size;
+
+ holder.nr_jited_line_info = info->nr_jited_line_info;
+ holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
+ needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
+
+ if (needed > *info_data_sz) {
+ ptr = realloc(*info_data, needed);
+ if (!ptr)
+ return -1;
+
+ *info_data = ptr;
+ *info_data_sz = needed;
+ }
+ ptr = *info_data;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.jited_prog_len;
+ } else {
+ holder.xlated_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.xlated_prog_len;
+ }
+
+ holder.jited_ksyms = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_ksyms * sizeof(__u64);
+
+ holder.jited_func_lens = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_func_lens * sizeof(__u32);
+
+ holder.func_info = ptr_to_u64(ptr);
+ ptr += holder.nr_func_info * holder.func_info_rec_size;
+
+ holder.line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_line_info * holder.line_info_rec_size;
+
+ holder.jited_line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
+
+ *info = holder;
+ return 0;
+}
+
+static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
+{
+ struct timespec real_time_ts, boot_time_ts;
+ time_t wallclock_secs;
+ struct tm load_tm;
+
+ buf[--size] = '\0';
+
+ if (clock_gettime(CLOCK_REALTIME, &real_time_ts) ||
+ clock_gettime(CLOCK_BOOTTIME, &boot_time_ts)) {
+ perror("Can't read clocks");
+ snprintf(buf, size, "%llu", nsecs / 1000000000);
+ return;
+ }
+
+ wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
+ (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+ 1000000000;
+
+
+ if (!localtime_r(&wallclock_secs, &load_tm)) {
+ snprintf(buf, size, "%llu", nsecs / 1000000000);
+ return;
+ }
+
+ if (json_output)
+ strftime(buf, size, "%s", &load_tm);
+ else
+ strftime(buf, size, "%FT%T%z", &load_tm);
+}
+
+static void show_prog_maps(int fd, __u32 num_maps)
+{
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ __u32 map_ids[num_maps];
+ unsigned int i;
+ int err;
+
+ info.nr_map_ids = num_maps;
+ info.map_ids = ptr_to_u64(map_ids);
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err || !info.nr_map_ids)
+ return;
+
+ if (json_output) {
+ jsonw_name(json_wtr, "map_ids");
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < info.nr_map_ids; i++)
+ jsonw_uint(json_wtr, map_ids[i]);
+ jsonw_end_array(json_wtr);
+ } else {
+ printf(" map_ids ");
+ for (i = 0; i < info.nr_map_ids; i++)
+ printf("%u%s", map_ids[i],
+ i == info.nr_map_ids - 1 ? "" : ",");
+ }
+}
+
+static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
+{
+ struct bpf_prog_info prog_info;
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ void *value = NULL;
+ __u32 *map_ids;
+ int nr_maps;
+ int key = 0;
+ int map_fd;
+ int ret;
+ __u32 i;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return NULL;
+
+ if (!prog_info.nr_map_ids)
+ return NULL;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return NULL;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ goto free_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (map_fd < 0)
+ goto free_map_ids;
+
+ memset(map_info, 0, sizeof(*map_info));
+ map_info_len = sizeof(*map_info);
+ ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
+ if (ret < 0) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (map_info->type != BPF_MAP_TYPE_ARRAY ||
+ map_info->key_size != sizeof(int) ||
+ map_info->max_entries != 1 ||
+ !map_info->btf_value_type_id ||
+ !strstr(map_info->name, ".rodata")) {
+ close(map_fd);
+ continue;
+ }
+
+ value = malloc(map_info->value_size);
+ if (!value) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, value)) {
+ close(map_fd);
+ free(value);
+ value = NULL;
+ goto free_map_ids;
+ }
+
+ close(map_fd);
+ break;
+ }
+
+free_map_ids:
+ free(map_ids);
+ return value;
+}
+
+static bool has_metadata_prefix(const char *s)
+{
+ return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
+}
+
+static void show_prog_metadata(int fd, __u32 num_maps)
+{
+ const struct btf_type *t_datasec, *t_var;
+ struct bpf_map_info map_info;
+ struct btf_var_secinfo *vsi;
+ bool printed_header = false;
+ unsigned int i, vlen;
+ void *value = NULL;
+ const char *name;
+ struct btf *btf;
+ int err;
+
+ if (!num_maps)
+ return;
+
+ memset(&map_info, 0, sizeof(map_info));
+ value = find_metadata(fd, &map_info);
+ if (!value)
+ return;
+
+ btf = btf__load_from_kernel_by_id(map_info.btf_id);
+ if (!btf)
+ goto out_free;
+
+ t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
+ if (!btf_is_datasec(t_datasec))
+ goto out_free;
+
+ vlen = btf_vlen(t_datasec);
+ vsi = btf_var_secinfos(t_datasec);
+
+ /* We don't proceed to check the kinds of the elements of the DATASEC.
+ * The verifier enforces them to be BTF_KIND_VAR.
+ */
+
+ if (json_output) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ jsonw_name(json_wtr, "metadata");
+ jsonw_start_object(json_wtr);
+ printed_header = true;
+ }
+
+ jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_end_object(json_wtr);
+ } else {
+ json_writer_t *btf_wtr;
+ struct btf_dumper d = {
+ .btf = btf,
+ .is_plain_text = true,
+ };
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ printf("\tmetadata:");
+
+ btf_wtr = jsonw_new(stdout);
+ if (!btf_wtr) {
+ p_err("jsonw alloc failed");
+ goto out_free;
+ }
+ d.jw = btf_wtr,
+
+ printed_header = true;
+ }
+
+ printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
+
+ jsonw_reset(btf_wtr);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_destroy(&btf_wtr);
+ }
+
+out_free:
+ btf__free(btf);
+ free(value);
+}
+
+static void print_prog_header_json(struct bpf_prog_info *info, int fd)
+{
+ const char *prog_type_str;
+ char prog_name[MAX_PROG_FULL_NAME];
+
+ jsonw_uint_field(json_wtr, "id", info->id);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "type", prog_type_str);
+ else
+ jsonw_uint_field(json_wtr, "type", info->type);
+
+ if (*info->name) {
+ get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
+ jsonw_string_field(json_wtr, "name", prog_name);
+ }
+
+ jsonw_name(json_wtr, "tag");
+ jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
+ info->tag[0], info->tag[1], info->tag[2], info->tag[3],
+ info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
+
+ jsonw_bool_field(json_wtr, "gpl_compatible", info->gpl_compatible);
+ if (info->run_time_ns) {
+ jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
+ jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
+ }
+ if (info->recursion_misses)
+ jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
+}
+
+static void print_prog_json(struct bpf_prog_info *info, int fd)
+{
+ char *memlock;
+
+ jsonw_start_object(json_wtr);
+ print_prog_header_json(info, fd);
+ print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
+
+ if (info->load_time) {
+ char buf[32];
+
+ print_boot_time(info->load_time, buf, sizeof(buf));
+
+ /* Piggy back on load_time, since 0 uid is a valid one */
+ jsonw_name(json_wtr, "loaded_at");
+ jsonw_printf(json_wtr, "%s", buf);
+ jsonw_uint_field(json_wtr, "uid", info->created_by_uid);
+ }
+
+ jsonw_uint_field(json_wtr, "bytes_xlated", info->xlated_prog_len);
+
+ if (info->jited_prog_len) {
+ jsonw_bool_field(json_wtr, "jited", true);
+ jsonw_uint_field(json_wtr, "bytes_jited", info->jited_prog_len);
+ } else {
+ jsonw_bool_field(json_wtr, "jited", false);
+ }
+
+ memlock = get_fdinfo(fd, "memlock");
+ if (memlock)
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
+ free(memlock);
+
+ if (info->nr_map_ids)
+ show_prog_maps(fd, info->nr_map_ids);
+
+ if (info->btf_id)
+ jsonw_int_field(json_wtr, "btf_id", info->btf_id);
+
+ if (!hashmap__empty(prog_table)) {
+ struct hashmap_entry *entry;
+
+ jsonw_name(json_wtr, "pinned");
+ jsonw_start_array(json_wtr);
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
+ jsonw_end_array(json_wtr);
+ }
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
+ show_prog_metadata(fd, info->nr_map_ids);
+
+ jsonw_end_object(json_wtr);
+}
+
+static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
+{
+ const char *prog_type_str;
+ char prog_name[MAX_PROG_FULL_NAME];
+
+ printf("%u: ", info->id);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+ if (prog_type_str)
+ printf("%s ", prog_type_str);
+ else
+ printf("type %u ", info->type);
+
+ if (*info->name) {
+ get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
+ printf("name %s ", prog_name);
+ }
+
+ printf("tag ");
+ fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
+ print_dev_plain(info->ifindex, info->netns_dev, info->netns_ino);
+ printf("%s", info->gpl_compatible ? " gpl" : "");
+ if (info->run_time_ns)
+ printf(" run_time_ns %lld run_cnt %lld",
+ info->run_time_ns, info->run_cnt);
+ if (info->recursion_misses)
+ printf(" recursion_misses %lld", info->recursion_misses);
+ printf("\n");
+}
+
+static void print_prog_plain(struct bpf_prog_info *info, int fd)
+{
+ char *memlock;
+
+ print_prog_header_plain(info, fd);
+
+ if (info->load_time) {
+ char buf[32];
+
+ print_boot_time(info->load_time, buf, sizeof(buf));
+
+ /* Piggy back on load_time, since 0 uid is a valid one */
+ printf("\tloaded_at %s uid %u\n", buf, info->created_by_uid);
+ }
+
+ printf("\txlated %uB", info->xlated_prog_len);
+
+ if (info->jited_prog_len)
+ printf(" jited %uB", info->jited_prog_len);
+ else
+ printf(" not jited");
+
+ memlock = get_fdinfo(fd, "memlock");
+ if (memlock)
+ printf(" memlock %sB", memlock);
+ free(memlock);
+
+ if (info->nr_map_ids)
+ show_prog_maps(fd, info->nr_map_ids);
+
+ if (!hashmap__empty(prog_table)) {
+ struct hashmap_entry *entry;
+
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
+ }
+
+ if (info->btf_id)
+ printf("\n\tbtf_id %d", info->btf_id);
+
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
+ printf("\n");
+
+ show_prog_metadata(fd, info->nr_map_ids);
+}
+
+static int show_prog(int fd)
+{
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ int err;
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ return -1;
+ }
+
+ if (json_output)
+ print_prog_json(&info, fd);
+ else
+ print_prog_plain(&info, fd);
+
+ return 0;
+}
+
+static int do_show_subset(int argc, char **argv)
+{
+ int *fds = NULL;
+ int nb_fds, i;
+ int err = -1;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = prog_parse_fds(&argc, &argv, &fds);
+ if (nb_fds < 1)
+ goto exit_free;
+
+ if (json_output && nb_fds > 1)
+ jsonw_start_array(json_wtr); /* root array */
+ for (i = 0; i < nb_fds; i++) {
+ err = show_prog(fds[i]);
+ if (err) {
+ for (; i < nb_fds; i++)
+ close(fds[i]);
+ break;
+ }
+ close(fds[i]);
+ }
+ if (json_output && nb_fds > 1)
+ jsonw_end_array(json_wtr); /* root array */
+
+exit_free:
+ free(fds);
+ return err;
+}
+
+static int do_show(int argc, char **argv)
+{
+ __u32 id = 0;
+ int err;
+ int fd;
+
+ if (show_pinned) {
+ prog_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(prog_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
+
+ if (argc == 2)
+ return do_show_subset(argc, argv);
+
+ if (argc)
+ return BAD_ARG();
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ while (true) {
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT) {
+ err = 0;
+ break;
+ }
+ p_err("can't get next program: %s%s", strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ err = -1;
+ break;
+ }
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get prog by id (%u): %s",
+ id, strerror(errno));
+ err = -1;
+ break;
+ }
+
+ err = show_prog(fd);
+ close(fd);
+ if (err)
+ break;
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(prog_table);
+
+ return err;
+}
+
+static int
+prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
+ char *filepath, bool opcodes, bool visual, bool linum)
+{
+ struct bpf_prog_linfo *prog_linfo = NULL;
+ const char *disasm_opt = NULL;
+ struct dump_data dd = {};
+ void *func_info = NULL;
+ struct btf *btf = NULL;
+ char func_sig[1024];
+ unsigned char *buf;
+ __u32 member_len;
+ int fd, err = -1;
+ ssize_t n;
+
+ if (mode == DUMP_JITED) {
+ if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
+ p_info("no instructions returned");
+ return -1;
+ }
+ buf = u64_to_ptr(info->jited_prog_insns);
+ member_len = info->jited_prog_len;
+ } else { /* DUMP_XLATED */
+ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
+ p_err("error retrieving insn dump: kernel.kptr_restrict set?");
+ return -1;
+ }
+ buf = u64_to_ptr(info->xlated_prog_insns);
+ member_len = info->xlated_prog_len;
+ }
+
+ if (info->btf_id) {
+ btf = btf__load_from_kernel_by_id(info->btf_id);
+ if (!btf) {
+ p_err("failed to get btf");
+ return -1;
+ }
+ }
+
+ func_info = u64_to_ptr(info->func_info);
+
+ if (info->nr_line_info) {
+ prog_linfo = bpf_prog_linfo__new(info);
+ if (!prog_linfo)
+ p_info("error in processing bpf_line_info. continue without it.");
+ }
+
+ if (filepath) {
+ fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
+ if (fd < 0) {
+ p_err("can't open file %s: %s", filepath,
+ strerror(errno));
+ goto exit_free;
+ }
+
+ n = write(fd, buf, member_len);
+ close(fd);
+ if (n != (ssize_t)member_len) {
+ p_err("error writing output file: %s",
+ n < 0 ? strerror(errno) : "short write");
+ goto exit_free;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ } else if (mode == DUMP_JITED) {
+ const char *name = NULL;
+
+ if (info->ifindex) {
+ name = ifindex_to_arch(info->ifindex, info->netns_dev,
+ info->netns_ino, &disasm_opt);
+ if (!name)
+ goto exit_free;
+ }
+
+ if (info->nr_jited_func_lens && info->jited_func_lens) {
+ struct kernel_sym *sym = NULL;
+ struct bpf_func_info *record;
+ char sym_name[SYM_MAX_NAME];
+ unsigned char *img = buf;
+ __u64 *ksyms = NULL;
+ __u32 *lens;
+ __u32 i;
+ if (info->nr_jited_ksyms) {
+ kernel_syms_load(&dd);
+ ksyms = u64_to_ptr(info->jited_ksyms);
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ lens = u64_to_ptr(info->jited_func_lens);
+ for (i = 0; i < info->nr_jited_func_lens; i++) {
+ if (ksyms) {
+ sym = kernel_syms_search(&dd, ksyms[i]);
+ if (sym)
+ sprintf(sym_name, "%s", sym->name);
+ else
+ sprintf(sym_name, "0x%016llx", ksyms[i]);
+ } else {
+ strcpy(sym_name, "unknown");
+ }
+
+ if (func_info) {
+ record = func_info + i * info->func_info_rec_size;
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ }
+
+ if (json_output) {
+ jsonw_start_object(json_wtr);
+ if (func_info && func_sig[0] != '\0') {
+ jsonw_name(json_wtr, "proto");
+ jsonw_string(json_wtr, func_sig);
+ }
+ jsonw_name(json_wtr, "name");
+ jsonw_string(json_wtr, sym_name);
+ jsonw_name(json_wtr, "insns");
+ } else {
+ if (func_info && func_sig[0] != '\0')
+ printf("%s:\n", func_sig);
+ printf("%s:\n", sym_name);
+ }
+
+ if (disasm_print_insn(img, lens[i], opcodes,
+ name, disasm_opt, btf,
+ prog_linfo, ksyms[i], i,
+ linum))
+ goto exit_free;
+
+ img += lens[i];
+
+ if (json_output)
+ jsonw_end_object(json_wtr);
+ else
+ printf("\n");
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr);
+ } else {
+ if (disasm_print_insn(buf, member_len, opcodes, name,
+ disasm_opt, btf, NULL, 0, 0,
+ false))
+ goto exit_free;
+ }
+ } else {
+ kernel_syms_load(&dd);
+ dd.nr_jited_ksyms = info->nr_jited_ksyms;
+ dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
+ dd.btf = btf;
+ dd.func_info = func_info;
+ dd.finfo_rec_size = info->func_info_rec_size;
+ dd.prog_linfo = prog_linfo;
+
+ if (json_output)
+ dump_xlated_json(&dd, buf, member_len, opcodes, linum);
+ else if (visual)
+ dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
+ else
+ dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
+ kernel_syms_destroy(&dd);
+ }
+
+ err = 0;
+
+exit_free:
+ btf__free(btf);
+ bpf_prog_linfo__free(prog_linfo);
+ return err;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ size_t info_data_sz = 0;
+ void *info_data = NULL;
+ char *filepath = NULL;
+ bool opcodes = false;
+ bool visual = false;
+ enum dump_mode mode;
+ bool linum = false;
+ int nb_fds, i = 0;
+ int *fds = NULL;
+ int err = -1;
+
+ if (is_prefix(*argv, "jited")) {
+ if (disasm_init())
+ return -1;
+ mode = DUMP_JITED;
+ } else if (is_prefix(*argv, "xlated")) {
+ mode = DUMP_XLATED;
+ } else {
+ p_err("expected 'xlated' or 'jited', got: %s", *argv);
+ return -1;
+ }
+ NEXT_ARG();
+
+ if (argc < 2)
+ usage();
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = prog_parse_fds(&argc, &argv, &fds);
+ if (nb_fds < 1)
+ goto exit_free;
+
+ while (argc) {
+ if (is_prefix(*argv, "file")) {
+ NEXT_ARG();
+ if (!argc) {
+ p_err("expected file path");
+ goto exit_close;
+ }
+ if (nb_fds > 1) {
+ p_err("several programs matched");
+ goto exit_close;
+ }
+
+ filepath = *argv;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "opcodes")) {
+ opcodes = true;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "visual")) {
+ if (nb_fds > 1) {
+ p_err("several programs matched");
+ goto exit_close;
+ }
+
+ visual = true;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "linum")) {
+ linum = true;
+ NEXT_ARG();
+ } else {
+ usage();
+ goto exit_close;
+ }
+ }
+
+ if (filepath && (opcodes || visual || linum)) {
+ p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
+ goto exit_close;
+ }
+ if (json_output && visual) {
+ p_err("'visual' is not compatible with JSON output");
+ goto exit_close;
+ }
+
+ if (json_output && nb_fds > 1)
+ jsonw_start_array(json_wtr); /* root array */
+ for (i = 0; i < nb_fds; i++) {
+ memset(&info, 0, sizeof(info));
+
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ break;
+ }
+
+ err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
+ if (err) {
+ p_err("can't grow prog info_data");
+ break;
+ }
+
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ break;
+ }
+
+ if (json_output && nb_fds > 1) {
+ jsonw_start_object(json_wtr); /* prog object */
+ print_prog_header_json(&info, fds[i]);
+ jsonw_name(json_wtr, "insns");
+ } else if (nb_fds > 1) {
+ print_prog_header_plain(&info, fds[i]);
+ }
+
+ err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
+
+ if (json_output && nb_fds > 1)
+ jsonw_end_object(json_wtr); /* prog object */
+ else if (i != nb_fds - 1 && nb_fds > 1)
+ printf("\n");
+
+ if (err)
+ break;
+ close(fds[i]);
+ }
+ if (json_output && nb_fds > 1)
+ jsonw_end_array(json_wtr); /* root array */
+
+exit_close:
+ for (; i < nb_fds; i++)
+ close(fds[i]);
+exit_free:
+ free(info_data);
+ free(fds);
+ return err;
+}
+
+static int do_pin(int argc, char **argv)
+{
+ int err;
+
+ err = do_pin_any(argc, argv, prog_parse_fd);
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+struct map_replace {
+ int idx;
+ int fd;
+ char *name;
+};
+
+static int map_replace_compar(const void *p1, const void *p2)
+{
+ const struct map_replace *a = p1, *b = p2;
+
+ return a->idx - b->idx;
+}
+
+static int parse_attach_detach_args(int argc, char **argv, int *progfd,
+ enum bpf_attach_type *attach_type,
+ int *mapfd)
+{
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ *progfd = prog_parse_fd(&argc, &argv);
+ if (*progfd < 0)
+ return *progfd;
+
+ *attach_type = parse_attach_type(*argv);
+ if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach/detach type");
+ return -EINVAL;
+ }
+
+ if (*attach_type == BPF_FLOW_DISSECTOR) {
+ *mapfd = 0;
+ return 0;
+ }
+
+ NEXT_ARG();
+ if (!REQ_ARGS(2))
+ return -EINVAL;
+
+ *mapfd = map_parse_fd(&argc, &argv);
+ if (*mapfd < 0)
+ return *mapfd;
+
+ return 0;
+}
+
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int err, progfd;
+ int mapfd;
+
+ err = parse_attach_detach_args(argc, argv,
+ &progfd, &attach_type, &mapfd);
+ if (err)
+ return err;
+
+ err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
+ if (err) {
+ p_err("failed prog attach to map");
+ return -EINVAL;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ return 0;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int err, progfd;
+ int mapfd;
+
+ err = parse_attach_detach_args(argc, argv,
+ &progfd, &attach_type, &mapfd);
+ if (err)
+ return err;
+
+ err = bpf_prog_detach2(progfd, mapfd, attach_type);
+ if (err) {
+ p_err("failed prog detach from map");
+ return -EINVAL;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+ return 0;
+}
+
+static int check_single_stdin(char *file_data_in, char *file_ctx_in)
+{
+ if (file_data_in && file_ctx_in &&
+ !strcmp(file_data_in, "-") && !strcmp(file_ctx_in, "-")) {
+ p_err("cannot use standard input for both data_in and ctx_in");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_run_data(const char *fname, void **data_ptr, unsigned int *size)
+{
+ size_t block_size = 256;
+ size_t buf_size = block_size;
+ size_t nb_read = 0;
+ void *tmp;
+ FILE *f;
+
+ if (!fname) {
+ *data_ptr = NULL;
+ *size = 0;
+ return 0;
+ }
+
+ if (!strcmp(fname, "-"))
+ f = stdin;
+ else
+ f = fopen(fname, "r");
+ if (!f) {
+ p_err("failed to open %s: %s", fname, strerror(errno));
+ return -1;
+ }
+
+ *data_ptr = malloc(block_size);
+ if (!*data_ptr) {
+ p_err("failed to allocate memory for data_in/ctx_in: %s",
+ strerror(errno));
+ goto err_fclose;
+ }
+
+ while ((nb_read += fread(*data_ptr + nb_read, 1, block_size, f))) {
+ if (feof(f))
+ break;
+ if (ferror(f)) {
+ p_err("failed to read data_in/ctx_in from %s: %s",
+ fname, strerror(errno));
+ goto err_free;
+ }
+ if (nb_read > buf_size - block_size) {
+ if (buf_size == UINT32_MAX) {
+ p_err("data_in/ctx_in is too long (max: %d)",
+ UINT32_MAX);
+ goto err_free;
+ }
+ /* No space for fread()-ing next chunk; realloc() */
+ buf_size *= 2;
+ tmp = realloc(*data_ptr, buf_size);
+ if (!tmp) {
+ p_err("failed to reallocate data_in/ctx_in: %s",
+ strerror(errno));
+ goto err_free;
+ }
+ *data_ptr = tmp;
+ }
+ }
+ if (f != stdin)
+ fclose(f);
+
+ *size = nb_read;
+ return 0;
+
+err_free:
+ free(*data_ptr);
+ *data_ptr = NULL;
+err_fclose:
+ if (f != stdin)
+ fclose(f);
+ return -1;
+}
+
+static void hex_print(void *data, unsigned int size, FILE *f)
+{
+ size_t i, j;
+ char c;
+
+ for (i = 0; i < size; i += 16) {
+ /* Row offset */
+ fprintf(f, "%07zx\t", i);
+
+ /* Hexadecimal values */
+ for (j = i; j < i + 16 && j < size; j++)
+ fprintf(f, "%02x%s", *(uint8_t *)(data + j),
+ j % 2 ? " " : "");
+ for (; j < i + 16; j++)
+ fprintf(f, " %s", j % 2 ? " " : "");
+
+ /* ASCII values (if relevant), '.' otherwise */
+ fprintf(f, "| ");
+ for (j = i; j < i + 16 && j < size; j++) {
+ c = *(char *)(data + j);
+ if (c < ' ' || c > '~')
+ c = '.';
+ fprintf(f, "%c%s", c, j == i + 7 ? " " : "");
+ }
+
+ fprintf(f, "\n");
+ }
+}
+
+static int
+print_run_output(void *data, unsigned int size, const char *fname,
+ const char *json_key)
+{
+ size_t nb_written;
+ FILE *f;
+
+ if (!fname)
+ return 0;
+
+ if (!strcmp(fname, "-")) {
+ f = stdout;
+ if (json_output) {
+ jsonw_name(json_wtr, json_key);
+ print_data_json(data, size);
+ } else {
+ hex_print(data, size, f);
+ }
+ return 0;
+ }
+
+ f = fopen(fname, "w");
+ if (!f) {
+ p_err("failed to open %s: %s", fname, strerror(errno));
+ return -1;
+ }
+
+ nb_written = fwrite(data, 1, size, f);
+ fclose(f);
+ if (nb_written != size) {
+ p_err("failed to write output data/ctx: %s", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int alloc_run_data(void **data_ptr, unsigned int size_out)
+{
+ *data_ptr = calloc(size_out, 1);
+ if (!*data_ptr) {
+ p_err("failed to allocate memory for output data/ctx: %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int do_run(int argc, char **argv)
+{
+ char *data_fname_in = NULL, *data_fname_out = NULL;
+ char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
+ const unsigned int default_size = SZ_32K;
+ void *data_in = NULL, *data_out = NULL;
+ void *ctx_in = NULL, *ctx_out = NULL;
+ unsigned int repeat = 1;
+ int fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, test_attr);
+
+ if (!REQ_ARGS(4))
+ return -1;
+
+ fd = prog_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return -1;
+
+ while (argc) {
+ if (detect_common_prefix(*argv, "data_in", "data_out",
+ "data_size_out", NULL))
+ return -1;
+ if (detect_common_prefix(*argv, "ctx_in", "ctx_out",
+ "ctx_size_out", NULL))
+ return -1;
+
+ if (is_prefix(*argv, "data_in")) {
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ data_fname_in = GET_ARG();
+ if (check_single_stdin(data_fname_in, ctx_fname_in))
+ return -1;
+ } else if (is_prefix(*argv, "data_out")) {
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ data_fname_out = GET_ARG();
+ } else if (is_prefix(*argv, "data_size_out")) {
+ char *endptr;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ test_attr.data_size_out = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as output data size",
+ *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "ctx_in")) {
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ ctx_fname_in = GET_ARG();
+ if (check_single_stdin(data_fname_in, ctx_fname_in))
+ return -1;
+ } else if (is_prefix(*argv, "ctx_out")) {
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ ctx_fname_out = GET_ARG();
+ } else if (is_prefix(*argv, "ctx_size_out")) {
+ char *endptr;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ test_attr.ctx_size_out = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as output context size",
+ *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "repeat")) {
+ char *endptr;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(1))
+ return -1;
+
+ repeat = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as repeat number",
+ *argv);
+ return -1;
+ }
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'data_in', 'data_out', 'data_size_out', 'ctx_in', 'ctx_out', 'ctx_size_out' or 'repeat', got: '%s'?",
+ *argv);
+ return -1;
+ }
+ }
+
+ err = get_run_data(data_fname_in, &data_in, &test_attr.data_size_in);
+ if (err)
+ return -1;
+
+ if (data_in) {
+ if (!test_attr.data_size_out)
+ test_attr.data_size_out = default_size;
+ err = alloc_run_data(&data_out, test_attr.data_size_out);
+ if (err)
+ goto free_data_in;
+ }
+
+ err = get_run_data(ctx_fname_in, &ctx_in, &test_attr.ctx_size_in);
+ if (err)
+ goto free_data_out;
+
+ if (ctx_in) {
+ if (!test_attr.ctx_size_out)
+ test_attr.ctx_size_out = default_size;
+ err = alloc_run_data(&ctx_out, test_attr.ctx_size_out);
+ if (err)
+ goto free_ctx_in;
+ }
+
+ test_attr.repeat = repeat;
+ test_attr.data_in = data_in;
+ test_attr.data_out = data_out;
+ test_attr.ctx_in = ctx_in;
+ test_attr.ctx_out = ctx_out;
+
+ err = bpf_prog_test_run_opts(fd, &test_attr);
+ if (err) {
+ p_err("failed to run program: %s", strerror(errno));
+ goto free_ctx_out;
+ }
+
+ err = 0;
+
+ if (json_output)
+ jsonw_start_object(json_wtr); /* root */
+
+ /* Do not exit on errors occurring when printing output data/context,
+ * we still want to print return value and duration for program run.
+ */
+ if (test_attr.data_size_out)
+ err += print_run_output(test_attr.data_out,
+ test_attr.data_size_out,
+ data_fname_out, "data_out");
+ if (test_attr.ctx_size_out)
+ err += print_run_output(test_attr.ctx_out,
+ test_attr.ctx_size_out,
+ ctx_fname_out, "ctx_out");
+
+ if (json_output) {
+ jsonw_uint_field(json_wtr, "retval", test_attr.retval);
+ jsonw_uint_field(json_wtr, "duration", test_attr.duration);
+ jsonw_end_object(json_wtr); /* root */
+ } else {
+ fprintf(stdout, "Return value: %u, duration%s: %uns\n",
+ test_attr.retval,
+ repeat > 1 ? " (average)" : "", test_attr.duration);
+ }
+
+free_ctx_out:
+ free(ctx_out);
+free_ctx_in:
+ free(ctx_in);
+free_data_out:
+ free(data_out);
+free_data_in:
+ free(data_in);
+
+ return err;
+}
+
+static int
+get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
+ enum bpf_attach_type *expected_attach_type)
+{
+ libbpf_print_fn_t print_backup;
+ int ret;
+
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ if (!ret)
+ return ret;
+
+ /* libbpf_prog_type_by_name() failed, let's re-run with debug level */
+ print_backup = libbpf_set_print(print_all_levels);
+ ret = libbpf_prog_type_by_name(name, prog_type, expected_attach_type);
+ libbpf_set_print(print_backup);
+
+ return ret;
+}
+
+static int
+auto_attach_program(struct bpf_program *prog, const char *path)
+{
+ struct bpf_link *link;
+ int err;
+
+ link = bpf_program__attach(prog);
+ if (!link) {
+ p_info("Program %s does not support autoattach, falling back to pinning",
+ bpf_program__name(prog));
+ return bpf_obj_pin(bpf_program__fd(prog), path);
+ }
+
+ err = bpf_link__pin(link, path);
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int
+auto_attach_programs(struct bpf_object *obj, const char *path)
+{
+ struct bpf_program *prog;
+ char buf[PATH_MAX];
+ int err;
+
+ bpf_object__for_each_program(prog, obj) {
+ err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
+ if (err)
+ goto err_unpin_programs;
+
+ err = auto_attach_program(prog, buf);
+ if (err)
+ goto err_unpin_programs;
+ }
+
+ return 0;
+
+err_unpin_programs:
+ while ((prog = bpf_object__prev_program(obj, prog))) {
+ if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
+ continue;
+
+ bpf_program__unpin(prog, buf);
+ }
+
+ return err;
+}
+
+static int load_with_options(int argc, char **argv, bool first_prog_only)
+{
+ enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
+ .relaxed_maps = relaxed_maps,
+ );
+ enum bpf_attach_type expected_attach_type;
+ struct map_replace *map_replace = NULL;
+ struct bpf_program *prog = NULL, *pos;
+ unsigned int old_map_fds = 0;
+ const char *pinmaps = NULL;
+ __u32 xdpmeta_ifindex = 0;
+ __u32 offload_ifindex = 0;
+ bool auto_attach = false;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ const char *pinfile;
+ unsigned int i, j;
+ const char *file;
+ int idx, err;
+
+
+ if (!REQ_ARGS(2))
+ return -1;
+ file = GET_ARG();
+ pinfile = GET_ARG();
+
+ while (argc) {
+ if (is_prefix(*argv, "type")) {
+ NEXT_ARG();
+
+ if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
+ p_err("program type already specified");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ err = libbpf_prog_type_by_name(*argv, &common_prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ /* Put a '/' at the end of type to appease libbpf */
+ char *type = malloc(strlen(*argv) + 2);
+
+ if (!type) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ *type = 0;
+ strcat(type, *argv);
+ strcat(type, "/");
+
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0)
+ goto err_free_reuse_maps;
+ }
+
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "map")) {
+ void *new_map_replace;
+ char *endptr, *name;
+ int fd;
+
+ NEXT_ARG();
+
+ if (!REQ_ARGS(4))
+ goto err_free_reuse_maps;
+
+ if (is_prefix(*argv, "idx")) {
+ NEXT_ARG();
+
+ idx = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as IDX", *argv);
+ goto err_free_reuse_maps;
+ }
+ name = NULL;
+ } else if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ name = *argv;
+ idx = -1;
+ } else {
+ p_err("expected 'idx' or 'name', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+
+ fd = map_parse_fd(&argc, &argv);
+ if (fd < 0)
+ goto err_free_reuse_maps;
+
+ new_map_replace = libbpf_reallocarray(map_replace,
+ old_map_fds + 1,
+ sizeof(*map_replace));
+ if (!new_map_replace) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ map_replace = new_map_replace;
+
+ map_replace[old_map_fds].idx = idx;
+ map_replace[old_map_fds].name = name;
+ map_replace[old_map_fds].fd = fd;
+ old_map_fds++;
+ } else if (is_prefix(*argv, "dev")) {
+ p_info("Warning: 'bpftool prog load [...] dev <ifname>' syntax is deprecated.\n"
+ "Going further, please use 'offload_dev <ifname>' to offload program to device.\n"
+ "For applications using XDP hints only, use 'xdpmeta_dev <ifname>'.");
+ goto offload_dev;
+ } else if (is_prefix(*argv, "offload_dev")) {
+offload_dev:
+ NEXT_ARG();
+
+ if (offload_ifindex) {
+ p_err("offload_dev already specified");
+ goto err_free_reuse_maps;
+ } else if (xdpmeta_ifindex) {
+ p_err("xdpmeta_dev and offload_dev are mutually exclusive");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ offload_ifindex = if_nametoindex(*argv);
+ if (!offload_ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "xdpmeta_dev")) {
+ NEXT_ARG();
+
+ if (xdpmeta_ifindex) {
+ p_err("xdpmeta_dev already specified");
+ goto err_free_reuse_maps;
+ } else if (offload_ifindex) {
+ p_err("xdpmeta_dev and offload_dev are mutually exclusive");
+ goto err_free_reuse_maps;
+ }
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ xdpmeta_ifindex = if_nametoindex(*argv);
+ if (!xdpmeta_ifindex) {
+ p_err("unrecognized netdevice '%s': %s",
+ *argv, strerror(errno));
+ goto err_free_reuse_maps;
+ }
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "pinmaps")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ pinmaps = GET_ARG();
+ } else if (is_prefix(*argv, "autoattach")) {
+ auto_attach = true;
+ NEXT_ARG();
+ } else {
+ p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
+ *argv);
+ goto err_free_reuse_maps;
+ }
+ }
+
+ set_max_rlimit();
+
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!obj) {
+ p_err("failed to open object file");
+ goto err_free_reuse_maps;
+ }
+
+ bpf_object__for_each_program(pos, obj) {
+ enum bpf_prog_type prog_type = common_prog_type;
+
+ if (prog_type == BPF_PROG_TYPE_UNSPEC) {
+ const char *sec_name = bpf_program__section_name(pos);
+
+ err = get_prog_type_by_name(sec_name, &prog_type,
+ &expected_attach_type);
+ if (err < 0)
+ goto err_close_obj;
+ }
+
+ if (prog_type == BPF_PROG_TYPE_XDP && xdpmeta_ifindex) {
+ bpf_program__set_flags(pos, BPF_F_XDP_DEV_BOUND_ONLY);
+ bpf_program__set_ifindex(pos, xdpmeta_ifindex);
+ } else {
+ bpf_program__set_ifindex(pos, offload_ifindex);
+ }
+ if (bpf_program__type(pos) != prog_type)
+ bpf_program__set_type(pos, prog_type);
+ bpf_program__set_expected_attach_type(pos, expected_attach_type);
+ }
+
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* After the sort maps by name will be first on the list, because they
+ * have idx == -1. Resolve them.
+ */
+ j = 0;
+ while (j < old_map_fds && map_replace[j].name) {
+ i = 0;
+ bpf_object__for_each_map(map, obj) {
+ if (!strcmp(bpf_map__name(map), map_replace[j].name)) {
+ map_replace[j].idx = i;
+ break;
+ }
+ i++;
+ }
+ if (map_replace[j].idx == -1) {
+ p_err("unable to find map '%s'", map_replace[j].name);
+ goto err_close_obj;
+ }
+ j++;
+ }
+ /* Resort if any names were resolved */
+ if (j)
+ qsort(map_replace, old_map_fds, sizeof(*map_replace),
+ map_replace_compar);
+
+ /* Set ifindex and name reuse */
+ j = 0;
+ idx = 0;
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+ bpf_map__set_ifindex(map, offload_ifindex);
+
+ if (j < old_map_fds && idx == map_replace[j].idx) {
+ err = bpf_map__reuse_fd(map, map_replace[j++].fd);
+ if (err) {
+ p_err("unable to set up map reuse: %d", err);
+ goto err_close_obj;
+ }
+
+ /* Next reuse wants to apply to the same map */
+ if (j < old_map_fds && map_replace[j].idx == idx) {
+ p_err("replacement for map idx %d specified more than once",
+ idx);
+ goto err_close_obj;
+ }
+ }
+
+ idx++;
+ }
+ if (j < old_map_fds) {
+ p_err("map idx '%d' not used", map_replace[j].idx);
+ goto err_close_obj;
+ }
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
+ }
+
+ err = mount_bpffs_for_pin(pinfile, !first_prog_only);
+ if (err)
+ goto err_close_obj;
+
+ if (first_prog_only) {
+ prog = bpf_object__next_program(obj, NULL);
+ if (!prog) {
+ p_err("object file doesn't contain any bpf program");
+ goto err_close_obj;
+ }
+
+ if (auto_attach)
+ err = auto_attach_program(prog, pinfile);
+ else
+ err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
+ if (err) {
+ p_err("failed to pin program %s",
+ bpf_program__section_name(prog));
+ goto err_close_obj;
+ }
+ } else {
+ if (auto_attach)
+ err = auto_attach_programs(obj, pinfile);
+ else
+ err = bpf_object__pin_programs(obj, pinfile);
+ if (err) {
+ p_err("failed to pin all programs");
+ goto err_close_obj;
+ }
+ }
+
+ if (pinmaps) {
+ err = bpf_object__pin_maps(obj, pinmaps);
+ if (err) {
+ p_err("failed to pin all maps");
+ goto err_unpin;
+ }
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ bpf_object__close(obj);
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
+
+ return 0;
+
+err_unpin:
+ if (first_prog_only)
+ unlink(pinfile);
+ else
+ bpf_object__unpin_programs(obj, pinfile);
+err_close_obj:
+ bpf_object__close(obj);
+err_free_reuse_maps:
+ for (i = 0; i < old_map_fds; i++)
+ close(map_replace[i].fd);
+ free(map_replace);
+ return -1;
+}
+
+static int count_open_fds(void)
+{
+ DIR *dp = opendir("/proc/self/fd");
+ struct dirent *de;
+ int cnt = -3;
+
+ if (!dp)
+ return -1;
+
+ while ((de = readdir(dp)))
+ cnt++;
+
+ closedir(dp);
+ return cnt;
+}
+
+static int try_loader(struct gen_loader_opts *gen)
+{
+ struct bpf_load_and_run_opts opts = {};
+ struct bpf_loader_ctx *ctx;
+ int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
+ sizeof(struct bpf_prog_desc));
+ int log_buf_sz = (1u << 24) - 1;
+ int err, fds_before, fd_delta;
+ char *log_buf = NULL;
+
+ ctx = alloca(ctx_sz);
+ memset(ctx, 0, ctx_sz);
+ ctx->sz = ctx_sz;
+ if (verifier_logs) {
+ ctx->log_level = 1 + 2 + 4;
+ ctx->log_size = log_buf_sz;
+ log_buf = malloc(log_buf_sz);
+ if (!log_buf)
+ return -ENOMEM;
+ ctx->log_buf = (long) log_buf;
+ }
+ opts.ctx = ctx;
+ opts.data = gen->data;
+ opts.data_sz = gen->data_sz;
+ opts.insns = gen->insns;
+ opts.insns_sz = gen->insns_sz;
+ fds_before = count_open_fds();
+ err = bpf_load_and_run(&opts);
+ fd_delta = count_open_fds() - fds_before;
+ if (err < 0 || verifier_logs) {
+ fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
+ if (fd_delta && err < 0)
+ fprintf(stderr, "loader prog leaked %d FDs\n",
+ fd_delta);
+ }
+ free(log_buf);
+ return err;
+}
+
+static int do_loader(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
+ struct bpf_object *obj;
+ const char *file;
+ int err = 0;
+
+ if (!REQ_ARGS(1))
+ return -1;
+ file = GET_ARG();
+
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!obj) {
+ p_err("failed to open object file");
+ goto err_close_obj;
+ }
+
+ err = bpf_object__gen_loader(obj, &gen);
+ if (err)
+ goto err_close_obj;
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
+ }
+
+ if (verifier_logs) {
+ struct dump_data dd = {};
+
+ kernel_syms_load(&dd);
+ dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
+ kernel_syms_destroy(&dd);
+ }
+ err = try_loader(&gen);
+err_close_obj:
+ bpf_object__close(obj);
+ return err;
+}
+
+static int do_load(int argc, char **argv)
+{
+ if (use_loader)
+ return do_loader(argc, argv);
+ return load_with_options(argc, argv, true);
+}
+
+static int do_loadall(int argc, char **argv)
+{
+ return load_with_options(argc, argv, false);
+}
+
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+static int do_profile(int argc, char **argv)
+{
+ p_err("bpftool prog profile command is not supported. Please build bpftool with clang >= 10.0.0");
+ return 0;
+}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "profiler.skel.h"
+
+struct profile_metric {
+ const char *name;
+ struct bpf_perf_event_value val;
+ struct perf_event_attr attr;
+ bool selected;
+
+ /* calculate ratios like instructions per cycle */
+ const int ratio_metric; /* 0 for N/A, 1 for index 0 (cycles) */
+ const char *ratio_desc;
+ const float ratio_mul;
+} metrics[] = {
+ {
+ .name = "cycles",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "instructions",
+ .attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .exclude_user = 1,
+ },
+ .ratio_metric = 1,
+ .ratio_desc = "insns per cycle",
+ .ratio_mul = 1.0,
+ },
+ {
+ .name = "l1d_loads",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_L1D |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
+ .exclude_user = 1,
+ },
+ },
+ {
+ .name = "llc_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_LL |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "LLC misses per million insns",
+ .ratio_mul = 1e6,
+ },
+ {
+ .name = "itlb_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_ITLB |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "itlb misses per million insns",
+ .ratio_mul = 1e6,
+ },
+ {
+ .name = "dtlb_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_DTLB |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "dtlb misses per million insns",
+ .ratio_mul = 1e6,
+ },
+};
+
+static __u64 profile_total_count;
+
+#define MAX_NUM_PROFILE_METRICS 4
+
+static int profile_parse_metrics(int argc, char **argv)
+{
+ unsigned int metric_cnt;
+ int selected_cnt = 0;
+ unsigned int i;
+
+ metric_cnt = ARRAY_SIZE(metrics);
+
+ while (argc > 0) {
+ for (i = 0; i < metric_cnt; i++) {
+ if (is_prefix(argv[0], metrics[i].name)) {
+ if (!metrics[i].selected)
+ selected_cnt++;
+ metrics[i].selected = true;
+ break;
+ }
+ }
+ if (i == metric_cnt) {
+ p_err("unknown metric %s", argv[0]);
+ return -1;
+ }
+ NEXT_ARG();
+ }
+ if (selected_cnt > MAX_NUM_PROFILE_METRICS) {
+ p_err("too many (%d) metrics, please specify no more than %d metrics at at time",
+ selected_cnt, MAX_NUM_PROFILE_METRICS);
+ return -1;
+ }
+ return selected_cnt;
+}
+
+static void profile_read_values(struct profiler_bpf *obj)
+{
+ __u32 m, cpu, num_cpu = obj->rodata->num_cpu;
+ int reading_map_fd, count_map_fd;
+ __u64 counts[num_cpu];
+ __u32 key = 0;
+ int err;
+
+ reading_map_fd = bpf_map__fd(obj->maps.accum_readings);
+ count_map_fd = bpf_map__fd(obj->maps.counts);
+ if (reading_map_fd < 0 || count_map_fd < 0) {
+ p_err("failed to get fd for map");
+ return;
+ }
+
+ err = bpf_map_lookup_elem(count_map_fd, &key, counts);
+ if (err) {
+ p_err("failed to read count_map: %s", strerror(errno));
+ return;
+ }
+
+ profile_total_count = 0;
+ for (cpu = 0; cpu < num_cpu; cpu++)
+ profile_total_count += counts[cpu];
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value values[num_cpu];
+
+ if (!metrics[m].selected)
+ continue;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &key, values);
+ if (err) {
+ p_err("failed to read reading_map: %s",
+ strerror(errno));
+ return;
+ }
+ for (cpu = 0; cpu < num_cpu; cpu++) {
+ metrics[m].val.counter += values[cpu].counter;
+ metrics[m].val.enabled += values[cpu].enabled;
+ metrics[m].val.running += values[cpu].running;
+ }
+ key++;
+ }
+}
+
+static void profile_print_readings_json(void)
+{
+ __u32 m;
+
+ jsonw_start_array(json_wtr);
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ jsonw_start_object(json_wtr);
+ jsonw_string_field(json_wtr, "metric", metrics[m].name);
+ jsonw_lluint_field(json_wtr, "run_cnt", profile_total_count);
+ jsonw_lluint_field(json_wtr, "value", metrics[m].val.counter);
+ jsonw_lluint_field(json_wtr, "enabled", metrics[m].val.enabled);
+ jsonw_lluint_field(json_wtr, "running", metrics[m].val.running);
+
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+static void profile_print_readings_plain(void)
+{
+ __u32 m;
+
+ printf("\n%18llu %-20s\n", profile_total_count, "run_cnt");
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ struct bpf_perf_event_value *val = &metrics[m].val;
+ int r;
+
+ if (!metrics[m].selected)
+ continue;
+ printf("%18llu %-20s", val->counter, metrics[m].name);
+
+ r = metrics[m].ratio_metric - 1;
+ if (r >= 0 && metrics[r].selected &&
+ metrics[r].val.counter > 0) {
+ printf("# %8.2f %-30s",
+ val->counter * metrics[m].ratio_mul /
+ metrics[r].val.counter,
+ metrics[m].ratio_desc);
+ } else {
+ printf("%-41s", "");
+ }
+
+ if (val->enabled > val->running)
+ printf("(%4.2f%%)",
+ val->running * 100.0 / val->enabled);
+ printf("\n");
+ }
+}
+
+static void profile_print_readings(void)
+{
+ if (json_output)
+ profile_print_readings_json();
+ else
+ profile_print_readings_plain();
+}
+
+static char *profile_target_name(int tgt_fd)
+{
+ struct bpf_func_info func_info;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ const struct btf_type *t;
+ __u32 func_info_rec_size;
+ struct btf *btf = NULL;
+ char *name = NULL;
+ int err;
+
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to get info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ if (info.btf_id == 0) {
+ p_err("prog FD %d doesn't have valid btf", tgt_fd);
+ goto out;
+ }
+
+ func_info_rec_size = info.func_info_rec_size;
+ if (info.nr_func_info == 0) {
+ p_err("found 0 func_info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = func_info_rec_size;
+ info.func_info = ptr_to_u64(&func_info);
+
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to get func_info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!btf) {
+ p_err("failed to load btf for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ t = btf__type_by_id(btf, func_info.type_id);
+ if (!t) {
+ p_err("btf %d doesn't have type %d",
+ info.btf_id, func_info.type_id);
+ goto out;
+ }
+ name = strdup(btf__name_by_offset(btf, t->name_off));
+out:
+ btf__free(btf);
+ return name;
+}
+
+static struct profiler_bpf *profile_obj;
+static int profile_tgt_fd = -1;
+static char *profile_tgt_name;
+static int *profile_perf_events;
+static int profile_perf_event_cnt;
+
+static void profile_close_perf_events(struct profiler_bpf *obj)
+{
+ int i;
+
+ for (i = profile_perf_event_cnt - 1; i >= 0; i--)
+ close(profile_perf_events[i]);
+
+ free(profile_perf_events);
+ profile_perf_event_cnt = 0;
+}
+
+static int profile_open_perf_event(int mid, int cpu, int map_fd)
+{
+ int pmu_fd;
+
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
+ -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ if (errno == ENODEV) {
+ p_info("cpu %d may be offline, skip %s profiling.",
+ cpu, metrics[mid].name);
+ profile_perf_event_cnt++;
+ return 0;
+ }
+ return -1;
+ }
+
+ if (bpf_map_update_elem(map_fd,
+ &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ close(pmu_fd);
+ return -1;
+ }
+
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ return 0;
+}
+
+static int profile_open_perf_events(struct profiler_bpf *obj)
+{
+ unsigned int cpu, m;
+ int map_fd;
+
+ profile_perf_events = calloc(
+ sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
+ if (!profile_perf_events) {
+ p_err("failed to allocate memory for perf_event array: %s",
+ strerror(errno));
+ return -1;
+ }
+ map_fd = bpf_map__fd(obj->maps.events);
+ if (map_fd < 0) {
+ p_err("failed to get fd for events map");
+ return -1;
+ }
+
+ for (m = 0; m < ARRAY_SIZE(metrics); m++) {
+ if (!metrics[m].selected)
+ continue;
+ for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
+ if (profile_open_perf_event(m, cpu, map_fd)) {
+ p_err("failed to create event %s on cpu %d",
+ metrics[m].name, cpu);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+static void profile_print_and_cleanup(void)
+{
+ profile_close_perf_events(profile_obj);
+ profile_read_values(profile_obj);
+ profile_print_readings();
+ profiler_bpf__destroy(profile_obj);
+
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+}
+
+static void int_exit(int signo)
+{
+ profile_print_and_cleanup();
+ exit(0);
+}
+
+static int do_profile(int argc, char **argv)
+{
+ int num_metric, num_cpu, err = -1;
+ struct bpf_program *prog;
+ unsigned long duration;
+ char *endptr;
+
+ /* we at least need two args for the prog and one metric */
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
+ /* parse target fd */
+ profile_tgt_fd = prog_parse_fd(&argc, &argv);
+ if (profile_tgt_fd < 0) {
+ p_err("failed to parse fd");
+ return -1;
+ }
+
+ /* parse profiling optional duration */
+ if (argc > 2 && is_prefix(argv[0], "duration")) {
+ NEXT_ARG();
+ duration = strtoul(*argv, &endptr, 0);
+ if (*endptr)
+ usage();
+ NEXT_ARG();
+ } else {
+ duration = UINT_MAX;
+ }
+
+ num_metric = profile_parse_metrics(argc, argv);
+ if (num_metric <= 0)
+ goto out;
+
+ num_cpu = libbpf_num_possible_cpus();
+ if (num_cpu <= 0) {
+ p_err("failed to identify number of CPUs");
+ goto out;
+ }
+
+ profile_obj = profiler_bpf__open();
+ if (!profile_obj) {
+ p_err("failed to open and/or load BPF object");
+ goto out;
+ }
+
+ profile_obj->rodata->num_cpu = num_cpu;
+ profile_obj->rodata->num_metric = num_metric;
+
+ /* adjust map sizes */
+ bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.counts, 1);
+
+ /* change target name */
+ profile_tgt_name = profile_target_name(profile_tgt_fd);
+ if (!profile_tgt_name)
+ goto out;
+
+ bpf_object__for_each_program(prog, profile_obj->obj) {
+ err = bpf_program__set_attach_target(prog, profile_tgt_fd,
+ profile_tgt_name);
+ if (err) {
+ p_err("failed to set attach target\n");
+ goto out;
+ }
+ }
+
+ set_max_rlimit();
+ err = profiler_bpf__load(profile_obj);
+ if (err) {
+ p_err("failed to load profile_obj");
+ goto out;
+ }
+
+ err = profile_open_perf_events(profile_obj);
+ if (err)
+ goto out;
+
+ err = profiler_bpf__attach(profile_obj);
+ if (err) {
+ p_err("failed to attach profile_obj");
+ goto out;
+ }
+ signal(SIGINT, int_exit);
+
+ sleep(duration);
+ profile_print_and_cleanup();
+ return 0;
+
+out:
+ profile_close_perf_events(profile_obj);
+ if (profile_obj)
+ profiler_bpf__destroy(profile_obj);
+ close(profile_tgt_fd);
+ free(profile_tgt_name);
+ return err;
+}
+
+#endif /* BPFTOOL_WITHOUT_SKELETONS */
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [PROG]\n"
+ " %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
+ " %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
+ " %1$s %2$s pin PROG FILE\n"
+ " %1$s %2$s { load | loadall } OBJ PATH \\\n"
+ " [type TYPE] [{ offload_dev | xdpmeta_dev } NAME] \\\n"
+ " [map { idx IDX | name NAME } MAP]\\\n"
+ " [pinmaps MAP_DIR]\n"
+ " [autoattach]\n"
+ " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
+ " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
+ " %1$s %2$s run PROG \\\n"
+ " data_in FILE \\\n"
+ " [data_out FILE [data_size_out L]] \\\n"
+ " [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
+ " [repeat N]\n"
+ " %1$s %2$s profile PROG [duration DURATION] METRICs\n"
+ " %1$s %2$s tracelog\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_MAP "\n"
+ " " HELP_SPEC_PROGRAM "\n"
+ " TYPE := { socket | kprobe | kretprobe | classifier | action |\n"
+ " tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
+ " cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
+ " lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
+ " sk_reuseport | flow_dissector | cgroup/sysctl |\n"
+ " cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
+ " cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
+ " cgroup/getpeername4 | cgroup/getpeername6 |\n"
+ " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
+ " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
+ " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
+ " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
+ " ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
+ " sk_skb_stream_parser | flow_dissector }\n"
+ " METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
+ " {-L|--use-loader} }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { "dump", do_dump },
+ { "pin", do_pin },
+ { "load", do_load },
+ { "loadall", do_loadall },
+ { "attach", do_attach },
+ { "detach", do_detach },
+ { "tracelog", do_tracelog },
+ { "run", do_run },
+ { "profile", do_profile },
+ { 0 }
+};
+
+int do_prog(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
new file mode 100644
index 0000000000..26004f0c5a
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_tracing.h>
+#include "pid_iter.h"
+
+/* keep in sync with the definition in main.h */
+enum bpf_obj_type {
+ BPF_OBJ_UNKNOWN,
+ BPF_OBJ_PROG,
+ BPF_OBJ_MAP,
+ BPF_OBJ_LINK,
+ BPF_OBJ_BTF,
+};
+
+struct bpf_perf_link___local {
+ struct bpf_link link;
+ struct file *perf_file;
+} __attribute__((preserve_access_index));
+
+struct perf_event___local {
+ u64 bpf_cookie;
+} __attribute__((preserve_access_index));
+
+enum bpf_link_type___local {
+ BPF_LINK_TYPE_PERF_EVENT___local = 7,
+};
+
+extern const void bpf_link_fops __ksym;
+extern const void bpf_map_fops __ksym;
+extern const void bpf_prog_fops __ksym;
+extern const void btf_fops __ksym;
+
+const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN;
+
+static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
+{
+ switch (type) {
+ case BPF_OBJ_PROG:
+ return BPF_CORE_READ((struct bpf_prog *)ent, aux, id);
+ case BPF_OBJ_MAP:
+ return BPF_CORE_READ((struct bpf_map *)ent, id);
+ case BPF_OBJ_BTF:
+ return BPF_CORE_READ((struct btf *)ent, id);
+ case BPF_OBJ_LINK:
+ return BPF_CORE_READ((struct bpf_link *)ent, id);
+ default:
+ return 0;
+ }
+}
+
+/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
+static __u64 get_bpf_cookie(struct bpf_link *link)
+{
+ struct bpf_perf_link___local *perf_link;
+ struct perf_event___local *event;
+
+ perf_link = container_of(link, struct bpf_perf_link___local, link);
+ event = BPF_CORE_READ(perf_link, perf_file, private_data);
+ return BPF_CORE_READ(event, bpf_cookie);
+}
+
+SEC("iter/task_file")
+int iter(struct bpf_iter__task_file *ctx)
+{
+ struct file *file = ctx->file;
+ struct task_struct *task = ctx->task;
+ struct pid_iter_entry e;
+ const void *fops;
+
+ if (!file || !task)
+ return 0;
+
+ switch (obj_type) {
+ case BPF_OBJ_PROG:
+ fops = &bpf_prog_fops;
+ break;
+ case BPF_OBJ_MAP:
+ fops = &bpf_map_fops;
+ break;
+ case BPF_OBJ_BTF:
+ fops = &btf_fops;
+ break;
+ case BPF_OBJ_LINK:
+ fops = &bpf_link_fops;
+ break;
+ default:
+ return 0;
+ }
+
+ if (file->f_op != fops)
+ return 0;
+
+ __builtin_memset(&e, 0, sizeof(e));
+ e.pid = task->tgid;
+ e.id = get_obj_id(file->private_data, obj_type);
+
+ if (obj_type == BPF_OBJ_LINK &&
+ bpf_core_enum_value_exists(enum bpf_link_type___local,
+ BPF_LINK_TYPE_PERF_EVENT___local)) {
+ struct bpf_link *link = (struct bpf_link *) file->private_data;
+
+ if (link->type == bpf_core_enum_value(enum bpf_link_type___local,
+ BPF_LINK_TYPE_PERF_EVENT___local)) {
+ e.has_bpf_cookie = true;
+ e.bpf_cookie = get_bpf_cookie(link);
+ }
+ }
+
+ bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm),
+ task->group_leader->comm);
+ bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h
new file mode 100644
index 0000000000..bbb570d4cc
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/pid_iter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2020 Facebook */
+#ifndef __PID_ITER_H
+#define __PID_ITER_H
+
+struct pid_iter_entry {
+ __u32 id;
+ int pid;
+ __u64 bpf_cookie;
+ bool has_bpf_cookie;
+ char comm[16];
+};
+
+#endif
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
new file mode 100644
index 0000000000..2f80edc682
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2020 Facebook
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct bpf_perf_event_value___local {
+ __u64 counter;
+ __u64 enabled;
+ __u64 running;
+} __attribute__((preserve_access_index));
+
+/* map of perf event fds, num_cpu * num_metric entries */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+/* readings at fentry */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value___local));
+} fentry_readings SEC(".maps");
+
+/* accumulated readings */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value___local));
+} accum_readings SEC(".maps");
+
+/* sample counts, one per cpu */
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u64));
+} counts SEC(".maps");
+
+const volatile __u32 num_cpu = 1;
+const volatile __u32 num_metric = 1;
+#define MAX_NUM_MATRICS 4
+
+SEC("fentry/XXX")
+int BPF_PROG(fentry_XXX)
+{
+ struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS];
+ u32 key = bpf_get_smp_processor_id();
+ u32 i;
+
+ /* look up before reading, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ u32 flag = i;
+
+ ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
+ if (!ptrs[i])
+ return 0;
+ }
+
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ struct bpf_perf_event_value___local reading;
+ int err;
+
+ err = bpf_perf_event_read_value(&events, key, (void *)&reading,
+ sizeof(reading));
+ if (err)
+ return 0;
+ *(ptrs[i]) = reading;
+ key += num_cpu;
+ }
+
+ return 0;
+}
+
+static inline void
+fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
+{
+ struct bpf_perf_event_value___local *before, diff;
+
+ before = bpf_map_lookup_elem(&fentry_readings, &id);
+ /* only account samples with a valid fentry_reading */
+ if (before && before->counter) {
+ struct bpf_perf_event_value___local *accum;
+
+ diff.counter = after->counter - before->counter;
+ diff.enabled = after->enabled - before->enabled;
+ diff.running = after->running - before->running;
+
+ accum = bpf_map_lookup_elem(&accum_readings, &id);
+ if (accum) {
+ accum->counter += diff.counter;
+ accum->enabled += diff.enabled;
+ accum->running += diff.running;
+ }
+ }
+}
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS];
+ u32 cpu = bpf_get_smp_processor_id();
+ u32 i, zero = 0;
+ int err;
+ u64 *count;
+
+ /* read all events before updating the maps, to reduce error */
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
+ err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
+ (void *)(readings + i),
+ sizeof(*readings));
+ if (err)
+ return 0;
+ }
+ count = bpf_map_lookup_elem(&counts, &zero);
+ if (count) {
+ *count += 1;
+ for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
+ fexit_update_maps(i, &readings[i]);
+ }
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
new file mode 100644
index 0000000000..3ebc9fe91e
--- /dev/null
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <linux/err.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
+
+static const struct btf_type *map_info_type;
+static __u32 map_info_alloc_len;
+static struct btf *btf_vmlinux;
+static __s32 map_info_type_id;
+
+struct res {
+ unsigned int nr_maps;
+ unsigned int nr_errs;
+};
+
+static const struct btf *get_btf_vmlinux(void)
+{
+ if (btf_vmlinux)
+ return btf_vmlinux;
+
+ btf_vmlinux = libbpf_find_kernel_btf();
+ if (!btf_vmlinux)
+ p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
+
+ return btf_vmlinux;
+}
+
+static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
+{
+ const struct btf *kern_btf;
+ const struct btf_type *t;
+ const char *st_ops_name;
+
+ kern_btf = get_btf_vmlinux();
+ if (!kern_btf)
+ return "<btf_vmlinux_not_found>";
+
+ t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
+ st_ops_name = btf__name_by_offset(kern_btf, t->name_off);
+ st_ops_name += strlen(STRUCT_OPS_VALUE_PREFIX);
+
+ return st_ops_name;
+}
+
+static __s32 get_map_info_type_id(void)
+{
+ const struct btf *kern_btf;
+
+ if (map_info_type_id)
+ return map_info_type_id;
+
+ kern_btf = get_btf_vmlinux();
+ if (!kern_btf)
+ return 0;
+
+ map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
+ BTF_KIND_STRUCT);
+ if (map_info_type_id < 0) {
+ p_err("can't find bpf_map_info from btf_vmlinux");
+ return map_info_type_id;
+ }
+ map_info_type = btf__type_by_id(kern_btf, map_info_type_id);
+
+ /* Ensure map_info_alloc() has at least what the bpftool needs */
+ map_info_alloc_len = map_info_type->size;
+ if (map_info_alloc_len < sizeof(struct bpf_map_info))
+ map_info_alloc_len = sizeof(struct bpf_map_info);
+
+ return map_info_type_id;
+}
+
+/* If the subcmd needs to print out the bpf_map_info,
+ * it should always call map_info_alloc to allocate
+ * a bpf_map_info object instead of allocating it
+ * on the stack.
+ *
+ * map_info_alloc() will take the running kernel's btf
+ * into account. i.e. it will consider the
+ * sizeof(struct bpf_map_info) of the running kernel.
+ *
+ * It will enable the "struct_ops" cmd to print the latest
+ * "struct bpf_map_info".
+ *
+ * [ Recall that "struct_ops" requires the kernel's btf to
+ * be available ]
+ */
+static struct bpf_map_info *map_info_alloc(__u32 *alloc_len)
+{
+ struct bpf_map_info *info;
+
+ if (get_map_info_type_id() < 0)
+ return NULL;
+
+ info = calloc(1, map_info_alloc_len);
+ if (!info)
+ p_err("mem alloc failed");
+ else
+ *alloc_len = map_info_alloc_len;
+
+ return info;
+}
+
+/* It iterates all struct_ops maps of the system.
+ * It returns the fd in "*res_fd" and map_info in "*info".
+ * In the very first iteration, info->id should be 0.
+ * An optional map "*name" filter can be specified.
+ * The filter can be made more flexible in the future.
+ * e.g. filter by kernel-struct-ops-name, regex-name, glob-name, ...etc.
+ *
+ * Return value:
+ * 1: A struct_ops map found. It is returned in "*res_fd" and "*info".
+ * The caller can continue to call get_next in the future.
+ * 0: No struct_ops map is returned.
+ * All struct_ops map has been found.
+ * -1: Error and the caller should abort the iteration.
+ */
+static int get_next_struct_ops_map(const char *name, int *res_fd,
+ struct bpf_map_info *info, __u32 info_len)
+{
+ __u32 id = info->id;
+ int err, fd;
+
+ while (true) {
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ return 0;
+ p_err("can't get next map: %s", strerror(errno));
+ return -1;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ return -1;
+ }
+
+ err = bpf_map_get_info_by_fd(fd, info, &info_len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ if (info->type == BPF_MAP_TYPE_STRUCT_OPS &&
+ (!name || !strcmp(name, info->name))) {
+ *res_fd = fd;
+ return 1;
+ }
+ close(fd);
+ }
+}
+
+static int cmd_retval(const struct res *res, bool must_have_one_map)
+{
+ if (res->nr_errs || (!res->nr_maps && must_have_one_map))
+ return -1;
+
+ return 0;
+}
+
+/* "data" is the work_func private storage */
+typedef int (*work_func)(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr);
+
+/* Find all struct_ops map in the system.
+ * Filter out by "name" (if specified).
+ * Then call "func(fd, info, data, wtr)" on each struct_ops map found.
+ */
+static struct res do_search(const char *name, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ __u32 info_len;
+ int fd, err;
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ return res;
+ }
+
+ if (wtr)
+ jsonw_start_array(wtr);
+ while ((err = get_next_struct_ops_map(name, &fd, info, info_len)) == 1) {
+ res.nr_maps++;
+ err = func(fd, info, data, wtr);
+ if (err)
+ res.nr_errs++;
+ close(fd);
+ }
+ if (wtr)
+ jsonw_end_array(wtr);
+
+ if (err)
+ res.nr_errs++;
+
+ if (!wtr && name && !res.nr_errs && !res.nr_maps)
+ /* It is not printing empty [].
+ * Thus, needs to specifically say nothing found
+ * for "name" here.
+ */
+ p_err("no struct_ops found for %s", name);
+ else if (!wtr && json_output && !res.nr_errs)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+ free(info);
+ return res;
+}
+
+static struct res do_one_id(const char *id_str, work_func func, void *data,
+ struct json_writer *wtr)
+{
+ struct bpf_map_info *info;
+ struct res res = {};
+ unsigned long id;
+ __u32 info_len;
+ char *endptr;
+ int fd;
+
+ id = strtoul(id_str, &endptr, 0);
+ if (*endptr || !id || id > UINT32_MAX) {
+ p_err("invalid id %s", id_str);
+ res.nr_errs++;
+ return res;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get map by id (%lu): %s", id, strerror(errno));
+ res.nr_errs++;
+ return res;
+ }
+
+ info = map_info_alloc(&info_len);
+ if (!info) {
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (bpf_map_get_info_by_fd(fd, info, &info_len)) {
+ p_err("can't get map info: %s", strerror(errno));
+ res.nr_errs++;
+ goto done;
+ }
+
+ if (info->type != BPF_MAP_TYPE_STRUCT_OPS) {
+ p_err("%s id %u is not a struct_ops map", info->name, info->id);
+ res.nr_errs++;
+ goto done;
+ }
+
+ res.nr_maps++;
+
+ if (func(fd, info, data, wtr))
+ res.nr_errs++;
+ else if (!wtr && json_output)
+ /* The "func()" above is not writing any json (i.e. !wtr
+ * test here).
+ *
+ * However, "-j" is enabled and there is no errs here,
+ * so call json_null() as the current convention of
+ * other cmds.
+ */
+ jsonw_null(json_wtr);
+
+done:
+ free(info);
+ close(fd);
+
+ return res;
+}
+
+static struct res do_work_on_struct_ops(const char *search_type,
+ const char *search_term,
+ work_func func, void *data,
+ struct json_writer *wtr)
+{
+ if (search_type) {
+ if (is_prefix(search_type, "id"))
+ return do_one_id(search_term, func, data, wtr);
+ else if (!is_prefix(search_type, "name"))
+ usage();
+ }
+
+ return do_search(search_term, func, data, wtr);
+}
+
+static int __do_show(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ if (wtr) {
+ jsonw_start_object(wtr);
+ jsonw_uint_field(wtr, "id", info->id);
+ jsonw_string_field(wtr, "name", info->name);
+ jsonw_string_field(wtr, "kernel_struct_ops",
+ get_kern_struct_ops_name(info));
+ jsonw_end_object(wtr);
+ } else {
+ printf("%u: %-15s %-32s\n", info->id, info->name,
+ get_kern_struct_ops_name(info));
+ }
+
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_show,
+ NULL, json_wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_dump(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ struct btf_dumper *d = (struct btf_dumper *)data;
+ const struct btf_type *struct_ops_type;
+ const struct btf *kern_btf = d->btf;
+ const char *struct_ops_name;
+ int zero = 0;
+ void *value;
+
+ /* note: d->jw == wtr */
+
+ kern_btf = d->btf;
+
+ /* The kernel supporting BPF_MAP_TYPE_STRUCT_OPS must have
+ * btf_vmlinux_value_type_id.
+ */
+ struct_ops_type = btf__type_by_id(kern_btf,
+ info->btf_vmlinux_value_type_id);
+ struct_ops_name = btf__name_by_offset(kern_btf,
+ struct_ops_type->name_off);
+ value = calloc(1, info->value_size);
+ if (!value) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
+ if (bpf_map_lookup_elem(fd, &zero, value)) {
+ p_err("can't lookup struct_ops map %s id %u",
+ info->name, info->id);
+ free(value);
+ return -1;
+ }
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, "bpf_map_info");
+ btf_dumper_type(d, map_info_type_id, (void *)info);
+ jsonw_end_object(wtr);
+
+ jsonw_start_object(wtr);
+ jsonw_name(wtr, struct_ops_name);
+ btf_dumper_type(d, info->btf_vmlinux_value_type_id, value);
+ jsonw_end_object(wtr);
+
+ free(value);
+
+ return 0;
+}
+
+static int do_dump(int argc, char **argv)
+{
+ const char *search_type = NULL, *search_term = NULL;
+ json_writer_t *wtr = json_wtr;
+ const struct btf *kern_btf;
+ struct btf_dumper d = {};
+ struct res res;
+
+ if (argc && argc != 2)
+ usage();
+
+ if (argc == 2) {
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+ }
+
+ kern_btf = get_btf_vmlinux();
+ if (!kern_btf)
+ return -1;
+
+ if (!json_output) {
+ wtr = jsonw_new(stdout);
+ if (!wtr) {
+ p_err("can't create json writer");
+ return -1;
+ }
+ jsonw_pretty(wtr, true);
+ }
+
+ d.btf = kern_btf;
+ d.jw = wtr;
+ d.is_plain_text = !json_output;
+ d.prog_id_as_func_ptr = true;
+
+ res = do_work_on_struct_ops(search_type, search_term, __do_dump, &d,
+ wtr);
+
+ if (!json_output)
+ jsonw_destroy(&wtr);
+
+ return cmd_retval(&res, !!search_term);
+}
+
+static int __do_unregister(int fd, const struct bpf_map_info *info, void *data,
+ struct json_writer *wtr)
+{
+ int zero = 0;
+
+ if (bpf_map_delete_elem(fd, &zero)) {
+ p_err("can't unload %s %s id %u: %s",
+ get_kern_struct_ops_name(info), info->name,
+ info->id, strerror(errno));
+ return -1;
+ }
+
+ p_info("Unregistered %s %s id %u",
+ get_kern_struct_ops_name(info), info->name,
+ info->id);
+
+ return 0;
+}
+
+static int do_unregister(int argc, char **argv)
+{
+ const char *search_type, *search_term;
+ struct res res;
+
+ if (argc != 2)
+ usage();
+
+ search_type = GET_ARG();
+ search_term = GET_ARG();
+
+ res = do_work_on_struct_ops(search_type, search_term,
+ __do_unregister, NULL, NULL);
+
+ return cmd_retval(&res, true);
+}
+
+static int pin_link(struct bpf_link *link, const char *pindir,
+ const char *name)
+{
+ char pinfile[PATH_MAX];
+ int err;
+
+ err = pathname_concat(pinfile, sizeof(pinfile), pindir, name);
+ if (err)
+ return -1;
+
+ return bpf_link__pin(link, pinfile);
+}
+
+static int do_register(int argc, char **argv)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ __u32 link_info_len = sizeof(struct bpf_link_info);
+ struct bpf_link_info link_info = {};
+ struct bpf_map_info info = {};
+ __u32 info_len = sizeof(info);
+ int nr_errs = 0, nr_maps = 0;
+ const char *linkdir = NULL;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ struct bpf_map *map;
+ const char *file;
+
+ if (argc != 1 && argc != 2)
+ usage();
+
+ file = GET_ARG();
+ if (argc == 1)
+ linkdir = GET_ARG();
+
+ if (linkdir && mount_bpffs_for_pin(linkdir, true)) {
+ p_err("can't mount bpffs for pinning");
+ return -1;
+ }
+
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!obj)
+ return -1;
+
+ set_max_rlimit();
+
+ if (bpf_object__load(obj)) {
+ bpf_object__close(obj);
+ return -1;
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
+ continue;
+
+ link = bpf_map__attach_struct_ops(map);
+ if (!link) {
+ p_err("can't register struct_ops %s: %s",
+ bpf_map__name(map), strerror(errno));
+ nr_errs++;
+ continue;
+ }
+ nr_maps++;
+
+ if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
+ &info_len)) {
+ /* Not p_err. The struct_ops was attached
+ * successfully.
+ */
+ p_info("Registered %s but can't find id: %s",
+ bpf_map__name(map), strerror(errno));
+ goto clean_link;
+ }
+ if (!(bpf_map__map_flags(map) & BPF_F_LINK)) {
+ p_info("Registered %s %s id %u",
+ get_kern_struct_ops_name(&info),
+ info.name,
+ info.id);
+ goto clean_link;
+ }
+ if (bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info,
+ &link_info_len)) {
+ p_err("Registered %s but can't find link id: %s",
+ bpf_map__name(map), strerror(errno));
+ nr_errs++;
+ goto clean_link;
+ }
+ if (linkdir && pin_link(link, linkdir, info.name)) {
+ p_err("can't pin link %u for %s: %s",
+ link_info.id, info.name,
+ strerror(errno));
+ nr_errs++;
+ goto clean_link;
+ }
+ p_info("Registered %s %s map id %u link id %u",
+ get_kern_struct_ops_name(&info),
+ info.name, info.id, link_info.id);
+
+clean_link:
+ bpf_link__disconnect(link);
+ bpf_link__destroy(link);
+ }
+
+ bpf_object__close(obj);
+
+ if (nr_errs)
+ return -1;
+
+ if (!nr_maps) {
+ p_err("no struct_ops found in %s", file);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n"
+ " %1$s %2$s dump [STRUCT_OPS_MAP]\n"
+ " %1$s %2$s register OBJ [LINK_DIR]\n"
+ " %1$s %2$s unregister STRUCT_OPS_MAP\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "register", do_register },
+ { "unregister", do_unregister },
+ { "dump", do_dump },
+ { "help", do_help },
+ { 0 }
+};
+
+int do_struct_ops(int argc, char **argv)
+{
+ int err;
+
+ err = cmd_select(cmds, argc, argv, do_help);
+
+ btf__free(btf_vmlinux);
+
+ return err;
+}
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
new file mode 100644
index 0000000000..bf1f022127
--- /dev/null
+++ b/tools/bpf/bpftool/tracelog.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2015-2017 Daniel Borkmann */
+/* Copyright (c) 2018 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/magic.h>
+#include <fcntl.h>
+#include <sys/vfs.h>
+
+#include "main.h"
+
+#ifndef TRACEFS_MAGIC
+# define TRACEFS_MAGIC 0x74726163
+#endif
+
+#define _textify(x) #x
+#define textify(x) _textify(x)
+
+FILE *trace_pipe_fd;
+char *buff;
+
+static int validate_tracefs_mnt(const char *mnt, unsigned long magic)
+{
+ struct statfs st_fs;
+
+ if (statfs(mnt, &st_fs) < 0)
+ return -ENOENT;
+ if ((unsigned long)st_fs.f_type != magic)
+ return -ENOENT;
+
+ return 0;
+}
+
+static bool
+find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
+{
+ size_t src_len;
+
+ if (validate_tracefs_mnt(mntpt, magic))
+ return false;
+
+ src_len = strlen(mntpt);
+ if (src_len + 1 >= PATH_MAX) {
+ p_err("tracefs mount point name too long");
+ return false;
+ }
+
+ strcpy(mnt, mntpt);
+ return true;
+}
+
+static bool get_tracefs_pipe(char *mnt)
+{
+ static const char * const known_mnts[] = {
+ "/sys/kernel/debug/tracing",
+ "/sys/kernel/tracing",
+ "/tracing",
+ "/trace",
+ };
+ const char *pipe_name = "/trace_pipe";
+ const char *fstype = "tracefs";
+ char type[100], format[32];
+ const char * const *ptr;
+ bool found = false;
+ FILE *fp;
+
+ for (ptr = known_mnts; ptr < known_mnts + ARRAY_SIZE(known_mnts); ptr++)
+ if (find_tracefs_mnt_single(TRACEFS_MAGIC, mnt, *ptr))
+ goto exit_found;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return false;
+
+ /* Allow room for NULL terminating byte and pipe file name */
+ snprintf(format, sizeof(format), "%%*s %%%zds %%99s %%*s %%*d %%*d\\n",
+ PATH_MAX - strlen(pipe_name) - 1);
+ while (fscanf(fp, format, mnt, type) == 2)
+ if (strcmp(type, fstype) == 0) {
+ found = true;
+ break;
+ }
+ fclose(fp);
+
+ /* The string from fscanf() might be truncated, check mnt is valid */
+ if (found && validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
+ goto exit_found;
+
+ if (block_mount)
+ return false;
+
+ p_info("could not find tracefs, attempting to mount it now");
+ /* Most of the time, tracefs is automatically mounted by debugfs at
+ * /sys/kernel/debug/tracing when we try to access it. If we could not
+ * find it, it is likely that debugfs is not mounted. Let's give one
+ * attempt at mounting just tracefs at /sys/kernel/tracing.
+ */
+ strcpy(mnt, known_mnts[1]);
+ if (mount_tracefs(mnt))
+ return false;
+
+exit_found:
+ strcat(mnt, pipe_name);
+ return true;
+}
+
+static void exit_tracelog(int signum)
+{
+ fclose(trace_pipe_fd);
+ free(buff);
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_destroy(&json_wtr);
+ }
+
+ exit(0);
+}
+
+int do_tracelog(int argc, char **argv)
+{
+ const struct sigaction act = {
+ .sa_handler = exit_tracelog
+ };
+ char trace_pipe[PATH_MAX];
+ size_t buff_len = 0;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ if (!get_tracefs_pipe(trace_pipe))
+ return -1;
+
+ trace_pipe_fd = fopen(trace_pipe, "r");
+ if (!trace_pipe_fd) {
+ p_err("could not open trace pipe: %s", strerror(errno));
+ return -1;
+ }
+
+ sigaction(SIGHUP, &act, NULL);
+ sigaction(SIGINT, &act, NULL);
+ sigaction(SIGTERM, &act, NULL);
+ while (1) {
+ ssize_t ret;
+
+ ret = getline(&buff, &buff_len, trace_pipe_fd);
+ if (ret <= 0) {
+ p_err("failed to read content from trace pipe: %s",
+ strerror(errno));
+ break;
+ }
+ if (json_output)
+ jsonw_string(json_wtr, buff);
+ else
+ printf("%s", buff);
+ }
+
+ fclose(trace_pipe_fd);
+ free(buff);
+ return -1;
+}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
new file mode 100644
index 0000000000..567f56dfd9
--- /dev/null
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
+
+#include "disasm.h"
+#include "json_writer.h"
+#include "main.h"
+#include "xlated_dumper.h"
+
+static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
+{
+ return ((struct kernel_sym *)sym_a)->address -
+ ((struct kernel_sym *)sym_b)->address;
+}
+
+void kernel_syms_load(struct dump_data *dd)
+{
+ struct kernel_sym *sym;
+ char buff[256];
+ void *tmp, *address;
+ FILE *fp;
+
+ fp = fopen("/proc/kallsyms", "r");
+ if (!fp)
+ return;
+
+ while (fgets(buff, sizeof(buff), fp)) {
+ tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1,
+ sizeof(*dd->sym_mapping));
+ if (!tmp) {
+out:
+ free(dd->sym_mapping);
+ dd->sym_mapping = NULL;
+ fclose(fp);
+ return;
+ }
+ dd->sym_mapping = tmp;
+ sym = &dd->sym_mapping[dd->sym_count];
+
+ /* module is optional */
+ sym->module[0] = '\0';
+ /* trim the square brackets around the module name */
+ if (sscanf(buff, "%p %*c %s [%[^]]s", &address, sym->name, sym->module) < 2)
+ continue;
+ sym->address = (unsigned long)address;
+ if (!strcmp(sym->name, "__bpf_call_base")) {
+ dd->address_call_base = sym->address;
+ /* sysctl kernel.kptr_restrict was set */
+ if (!sym->address)
+ goto out;
+ }
+ if (sym->address)
+ dd->sym_count++;
+ }
+
+ fclose(fp);
+
+ qsort(dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp);
+}
+
+void kernel_syms_destroy(struct dump_data *dd)
+{
+ free(dd->sym_mapping);
+}
+
+struct kernel_sym *kernel_syms_search(struct dump_data *dd,
+ unsigned long key)
+{
+ struct kernel_sym sym = {
+ .address = key,
+ };
+
+ return dd->sym_mapping ?
+ bsearch(&sym, dd->sym_mapping, dd->sym_count,
+ sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
+}
+
+static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ vprintf(fmt, args);
+ va_end(args);
+}
+
+static void __printf(2, 3)
+print_insn_for_graph(void *private_data, const char *fmt, ...)
+{
+ char buf[64], *p;
+ va_list args;
+
+ va_start(args, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+
+ p = buf;
+ while (*p != '\0') {
+ if (*p == '\n') {
+ memmove(p + 3, p, strlen(buf) + 1 - (p - buf));
+ /* Align each instruction dump row left. */
+ *p++ = '\\';
+ *p++ = 'l';
+ /* Output multiline concatenation. */
+ *p++ = '\\';
+ } else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') {
+ memmove(p + 1, p, strlen(buf) + 1 - (p - buf));
+ /* Escape special character. */
+ *p++ = '\\';
+ }
+
+ p++;
+ }
+
+ printf("%s", buf);
+}
+
+static void __printf(2, 3)
+print_insn_json(void *private_data, const char *fmt, ...)
+{
+ unsigned int l = strlen(fmt);
+ char chomped_fmt[l];
+ va_list args;
+
+ va_start(args, fmt);
+ if (l > 0) {
+ strncpy(chomped_fmt, fmt, l - 1);
+ chomped_fmt[l - 1] = '\0';
+ }
+ jsonw_vprintf_enquote(json_wtr, chomped_fmt, args);
+ va_end(args);
+}
+
+static const char *print_call_pcrel(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address,
+ const struct bpf_insn *insn)
+{
+ if (!dd->nr_jited_ksyms)
+ /* Do not show address for interpreted programs */
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d", insn->off);
+ else if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#%s", insn->off, sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%+d#0x%lx", insn->off, address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call_helper(struct dump_data *dd,
+ struct kernel_sym *sym,
+ unsigned long address)
+{
+ if (sym)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "%s", sym->name);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%lx", address);
+ return dd->scratch_buff;
+}
+
+static const char *print_call(void *private_data,
+ const struct bpf_insn *insn)
+{
+ struct dump_data *dd = private_data;
+ unsigned long address = dd->address_call_base + insn->imm;
+ struct kernel_sym *sym;
+
+ if (insn->src_reg == BPF_PSEUDO_CALL &&
+ (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms)
+ address = dd->jited_ksyms[insn->imm];
+
+ sym = kernel_syms_search(dd, address);
+ if (insn->src_reg == BPF_PSEUDO_CALL)
+ return print_call_pcrel(dd, sym, address, insn);
+ else
+ return print_call_helper(dd, sym, address);
+}
+
+static const char *print_imm(void *private_data,
+ const struct bpf_insn *insn,
+ __u64 full_imm)
+{
+ struct dump_data *dd = private_data;
+
+ if (insn->src_reg == BPF_PSEUDO_MAP_FD)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u]", insn->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_FUNC)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "subprog[%+d]", insn->imm);
+ else
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "0x%llx", (unsigned long long)full_imm);
+ return dd->scratch_buff;
+}
+
+void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum)
+{
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_json,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ struct bpf_func_info *record;
+ struct bpf_insn *insn = buf;
+ struct btf *btf = dd->btf;
+ bool double_insn = false;
+ unsigned int nr_skip = 0;
+ char func_sig[1024];
+ unsigned int i;
+
+ jsonw_start_array(json_wtr);
+ record = dd->func_info;
+ for (i = 0; i < len / sizeof(*insn); i++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ jsonw_start_object(json_wtr);
+
+ if (btf && record) {
+ if (record->insn_off == i) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0') {
+ jsonw_name(json_wtr, "proto");
+ jsonw_string(json_wtr, func_sig);
+ }
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
+ if (linfo) {
+ btf_dump_linfo_json(btf, linfo, linum);
+ nr_skip++;
+ }
+ }
+
+ jsonw_name(json_wtr, "disasm");
+ print_bpf_insn(&cbs, insn + i, true);
+
+ if (opcodes) {
+ jsonw_name(json_wtr, "opcodes");
+ jsonw_start_object(json_wtr);
+
+ jsonw_name(json_wtr, "code");
+ jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code);
+
+ jsonw_name(json_wtr, "src_reg");
+ jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg);
+
+ jsonw_name(json_wtr, "dst_reg");
+ jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg);
+
+ jsonw_name(json_wtr, "off");
+ print_hex_data_json((uint8_t *)(&insn[i].off), 2);
+
+ jsonw_name(json_wtr, "imm");
+ if (double_insn && i < len - 1)
+ print_hex_data_json((uint8_t *)(&insn[i].imm),
+ 12);
+ else
+ print_hex_data_json((uint8_t *)(&insn[i].imm),
+ 4);
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_object(json_wtr);
+ }
+ jsonw_end_array(json_wtr);
+}
+
+void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum)
+{
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ struct bpf_func_info *record;
+ struct bpf_insn *insn = buf;
+ struct btf *btf = dd->btf;
+ unsigned int nr_skip = 0;
+ bool double_insn = false;
+ char func_sig[1024];
+ unsigned int i;
+
+ record = dd->func_info;
+ for (i = 0; i < len / sizeof(*insn); i++) {
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+
+ if (btf && record) {
+ if (record->insn_off == i) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0')
+ printf("%s:\n", func_sig);
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
+ if (linfo) {
+ btf_dump_linfo_plain(btf, linfo, "; ",
+ linum);
+ nr_skip++;
+ }
+ }
+
+ double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ printf("% 4d: ", i);
+ print_bpf_insn(&cbs, insn + i, true);
+
+ if (opcodes) {
+ printf(" ");
+ fprint_hex(stdout, insn + i, 8, " ");
+ if (double_insn && i < len - 1) {
+ printf(" ");
+ fprint_hex(stdout, insn + i + 1, 8, " ");
+ }
+ printf("\n");
+ }
+ }
+}
+
+void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
+ unsigned int start_idx,
+ bool opcodes, bool linum)
+{
+ const struct bpf_insn_cbs cbs = {
+ .cb_print = print_insn_for_graph,
+ .cb_call = print_call,
+ .cb_imm = print_imm,
+ .private_data = dd,
+ };
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
+ const struct bpf_line_info *last_linfo = NULL;
+ struct bpf_func_info *record = dd->func_info;
+ struct bpf_insn *insn_start = buf_start;
+ struct bpf_insn *insn_end = buf_end;
+ struct bpf_insn *cur = insn_start;
+ struct btf *btf = dd->btf;
+ bool double_insn = false;
+ char func_sig[1024];
+
+ for (; cur <= insn_end; cur++) {
+ unsigned int insn_off;
+
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ insn_off = (unsigned int)(cur - insn_start + start_idx);
+ if (btf && record) {
+ if (record->insn_off == insn_off) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0')
+ printf("; %s:\\l\\\n", func_sig);
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, insn_off, 0);
+ if (linfo && linfo != last_linfo) {
+ btf_dump_linfo_dotlabel(btf, linfo, linum);
+ last_linfo = linfo;
+ }
+ }
+
+ printf("%d: ", insn_off);
+ print_bpf_insn(&cbs, cur, true);
+
+ if (opcodes) {
+ printf("\\ \\ \\ \\ ");
+ fprint_hex(stdout, cur, 8, " ");
+ if (double_insn && cur <= insn_end - 1) {
+ printf(" ");
+ fprint_hex(stdout, cur + 1, 8, " ");
+ }
+ printf("\\l\\\n");
+ }
+
+ if (cur != insn_end)
+ printf("| ");
+ }
+}
diff --git a/tools/bpf/bpftool/xlated_dumper.h b/tools/bpf/bpftool/xlated_dumper.h
new file mode 100644
index 0000000000..db3ba06715
--- /dev/null
+++ b/tools/bpf/bpftool/xlated_dumper.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#ifndef __BPF_TOOL_XLATED_DUMPER_H
+#define __BPF_TOOL_XLATED_DUMPER_H
+
+#define SYM_MAX_NAME 256
+#define MODULE_MAX_NAME 64
+
+struct bpf_prog_linfo;
+
+struct kernel_sym {
+ unsigned long address;
+ char name[SYM_MAX_NAME];
+ char module[MODULE_MAX_NAME];
+};
+
+struct dump_data {
+ unsigned long address_call_base;
+ struct kernel_sym *sym_mapping;
+ __u32 sym_count;
+ __u64 *jited_ksyms;
+ __u32 nr_jited_ksyms;
+ struct btf *btf;
+ void *func_info;
+ __u32 finfo_rec_size;
+ const struct bpf_prog_linfo *prog_linfo;
+ char scratch_buff[SYM_MAX_NAME + 8];
+};
+
+void kernel_syms_load(struct dump_data *dd);
+void kernel_syms_destroy(struct dump_data *dd);
+struct kernel_sym *kernel_syms_search(struct dump_data *dd, unsigned long key);
+void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum);
+void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum);
+void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
+ unsigned int start_index,
+ bool opcodes, bool linum);
+
+#endif
diff --git a/tools/bpf/resolve_btfids/.gitignore b/tools/bpf/resolve_btfids/.gitignore
new file mode 100644
index 0000000000..52d5e9721d
--- /dev/null
+++ b/tools/bpf/resolve_btfids/.gitignore
@@ -0,0 +1,4 @@
+/fixdep
+/resolve_btfids
+/libbpf/
+/libsubcmd/
diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
new file mode 100644
index 0000000000..077de3829c
--- /dev/null
+++ b/tools/bpf/resolve_btfids/Build
@@ -0,0 +1,12 @@
+hostprogs := resolve_btfids
+
+resolve_btfids-y += main.o
+resolve_btfids-y += rbtree.o
+resolve_btfids-y += zalloc.o
+resolve_btfids-y += string.o
+resolve_btfids-y += ctype.o
+resolve_btfids-y += str_error_r.o
+
+$(OUTPUT)%.o: ../../lib/%.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,host_cc_o_c)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
new file mode 100644
index 0000000000..4b8079f294
--- /dev/null
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0-only
+include ../../scripts/Makefile.include
+include ../../scripts/Makefile.arch
+
+srctree := $(abspath $(CURDIR)/../../../)
+
+ifeq ($(V),1)
+ Q =
+ msg =
+else
+ Q = @
+ ifeq ($(silent),1)
+ msg =
+ else
+ msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+ endif
+ MAKEFLAGS=--no-print-directory
+endif
+
+# Overrides for the prepare step libraries.
+HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
+ CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+
+RM ?= rm
+HOSTCC ?= gcc
+HOSTLD ?= ld
+HOSTAR ?= ar
+CROSS_COMPILE =
+
+OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
+
+LIBBPF_SRC := $(srctree)/tools/lib/bpf/
+SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
+
+BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
+LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
+SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
+SUBCMD_OUT := $(abspath $(dir $(SUBCMDOBJ)))/
+
+LIBBPF_DESTDIR := $(LIBBPF_OUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+
+SUBCMD_DESTDIR := $(SUBCMD_OUT)
+SUBCMD_INCLUDE := $(SUBCMD_DESTDIR)include
+
+BINARY := $(OUTPUT)/resolve_btfids
+BINARY_IN := $(BINARY)-in.o
+
+all: $(BINARY)
+
+prepare: $(BPFOBJ) $(SUBCMDOBJ)
+
+$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
+ $(call msg,MKDIR,,$@)
+ $(Q)mkdir -p $(@)
+
+$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
+ $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(SUBCMD_OUT) \
+ DESTDIR=$(SUBCMD_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ $(abspath $@) install_headers
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \
+ DESTDIR=$(LIBBPF_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ $(abspath $@) install_headers
+
+LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+
+HOSTCFLAGS_resolve_btfids += -g \
+ -I$(srctree)/tools/include \
+ -I$(srctree)/tools/include/uapi \
+ -I$(LIBBPF_INCLUDE) \
+ -I$(SUBCMD_INCLUDE) \
+ $(LIBELF_FLAGS)
+
+LIBS = $(LIBELF_LIBS) -lz
+
+export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
+include $(srctree)/tools/build/Makefile.include
+
+$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
+ $(Q)$(MAKE) $(build)=resolve_btfids
+
+$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+ $(call msg,LINK,$@)
+ $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+
+clean_objects := $(wildcard $(OUTPUT)/*.o \
+ $(OUTPUT)/.*.o.cmd \
+ $(OUTPUT)/.*.o.d \
+ $(LIBBPF_OUT) \
+ $(LIBBPF_DESTDIR) \
+ $(SUBCMD_OUT) \
+ $(SUBCMD_DESTDIR) \
+ $(OUTPUT)/resolve_btfids)
+
+ifneq ($(clean_objects),)
+clean: fixdep-clean
+ $(call msg,CLEAN,$(BINARY))
+ $(Q)$(RM) -rf $(clean_objects)
+else
+clean:
+endif
+
+tags:
+ $(call msg,GEN,,tags)
+ $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC)
+
+FORCE:
+
+.PHONY: all FORCE clean tags prepare
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
new file mode 100644
index 0000000000..27a23196d5
--- /dev/null
+++ b/tools/bpf/resolve_btfids/main.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * resolve_btfids scans ELF object for .BTF_ids section and resolves
+ * its symbols with BTF ID values.
+ *
+ * Each symbol points to 4 bytes data and is expected to have
+ * following name syntax:
+ *
+ * __BTF_ID__<type>__<symbol>[__<id>]
+ *
+ * type is:
+ *
+ * func - lookup BTF_KIND_FUNC symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__func__vfs_close__1:
+ * .zero 4
+ *
+ * struct - lookup BTF_KIND_STRUCT symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__struct__sk_buff__1:
+ * .zero 4
+ *
+ * union - lookup BTF_KIND_UNION symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__union__thread_union__1:
+ * .zero 4
+ *
+ * typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__typedef__pid_t__1:
+ * .zero 4
+ *
+ * set - store symbol size into first 4 bytes and sort following
+ * ID list
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__func__vfs_getattr__3:
+ * .zero 4
+ * __BTF_ID__func__vfs_fallocate__4:
+ * .zero 4
+ *
+ * set8 - store symbol size into first 4 bytes and sort following
+ * ID list
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__func__vfs_getattr__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__func__vfs_fallocate__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/rbtree.h>
+#include <linux/zalloc.h>
+#include <linux/err.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <subcmd/parse-options.h>
+
+#define BTF_IDS_SECTION ".BTF_ids"
+#define BTF_ID "__BTF_ID__"
+
+#define BTF_STRUCT "struct"
+#define BTF_UNION "union"
+#define BTF_TYPEDEF "typedef"
+#define BTF_FUNC "func"
+#define BTF_SET "set"
+#define BTF_SET8 "set8"
+
+#define ADDR_CNT 100
+
+struct btf_id {
+ struct rb_node rb_node;
+ char *name;
+ union {
+ int id;
+ int cnt;
+ };
+ int addr_cnt;
+ bool is_set;
+ bool is_set8;
+ Elf64_Addr addr[ADDR_CNT];
+};
+
+struct object {
+ const char *path;
+ const char *btf;
+ const char *base_btf_path;
+
+ struct {
+ int fd;
+ Elf *elf;
+ Elf_Data *symbols;
+ Elf_Data *idlist;
+ int symbols_shndx;
+ int idlist_shndx;
+ size_t strtabidx;
+ unsigned long idlist_addr;
+ } efile;
+
+ struct rb_root sets;
+ struct rb_root structs;
+ struct rb_root unions;
+ struct rb_root typedefs;
+ struct rb_root funcs;
+
+ int nr_funcs;
+ int nr_structs;
+ int nr_unions;
+ int nr_typedefs;
+};
+
+static int verbose;
+
+static int eprintf(int level, int var, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (var >= level) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+ return ret;
+}
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_debug(fmt, ...) \
+ eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+ eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+static bool is_btf_id(const char *name)
+{
+ return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+}
+
+static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
+{
+ struct rb_node *p = root->rb_node;
+ struct btf_id *id;
+ int cmp;
+
+ while (p) {
+ id = rb_entry(p, struct btf_id, rb_node);
+ cmp = strcmp(id->name, name);
+ if (cmp < 0)
+ p = p->rb_left;
+ else if (cmp > 0)
+ p = p->rb_right;
+ else
+ return id;
+ }
+ return NULL;
+}
+
+static struct btf_id *
+btf_id__add(struct rb_root *root, char *name, bool unique)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct btf_id *id;
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ id = rb_entry(parent, struct btf_id, rb_node);
+ cmp = strcmp(id->name, name);
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else
+ return unique ? NULL : id;
+ }
+
+ id = zalloc(sizeof(*id));
+ if (id) {
+ pr_debug("adding symbol %s\n", name);
+ id->name = name;
+ rb_link_node(&id->rb_node, parent, p);
+ rb_insert_color(&id->rb_node, root);
+ }
+ return id;
+}
+
+static char *get_id(const char *prefix_end)
+{
+ /*
+ * __BTF_ID__func__vfs_truncate__0
+ * prefix_end = ^
+ * pos = ^
+ */
+ int len = strlen(prefix_end);
+ int pos = sizeof("__") - 1;
+ char *p, *id;
+
+ if (pos >= len)
+ return NULL;
+
+ id = strdup(prefix_end + pos);
+ if (id) {
+ /*
+ * __BTF_ID__func__vfs_truncate__0
+ * id = ^
+ *
+ * cut the unique id part
+ */
+ p = strrchr(id, '_');
+ p--;
+ if (*p != '_') {
+ free(id);
+ return NULL;
+ }
+ *p = '\0';
+ }
+ return id;
+}
+
+static struct btf_id *add_set(struct object *obj, char *name, bool is_set8)
+{
+ /*
+ * __BTF_ID__set__name
+ * name = ^
+ * id = ^
+ */
+ char *id = name + (is_set8 ? sizeof(BTF_SET8 "__") : sizeof(BTF_SET "__")) - 1;
+ int len = strlen(name);
+
+ if (id >= name + len) {
+ pr_err("FAILED to parse set name: %s\n", name);
+ return NULL;
+ }
+
+ return btf_id__add(&obj->sets, id, true);
+}
+
+static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
+{
+ char *id;
+
+ id = get_id(name + size);
+ if (!id) {
+ pr_err("FAILED to parse symbol name: %s\n", name);
+ return NULL;
+ }
+
+ return btf_id__add(root, id, false);
+}
+
+/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
+#ifndef SHF_COMPRESSED
+#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
+#endif
+
+/*
+ * The data of compressed section should be aligned to 4
+ * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
+ * sets sh_addralign to 1, which makes libelf fail with
+ * misaligned section error during the update:
+ * FAILED elf_update(WRITE): invalid section alignment
+ *
+ * While waiting for ld fix, we fix the compressed sections
+ * sh_addralign value manualy.
+ */
+static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh)
+{
+ int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8;
+
+ if (!(sh->sh_flags & SHF_COMPRESSED))
+ return 0;
+
+ if (sh->sh_addralign == expected)
+ return 0;
+
+ pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n",
+ sh->sh_addralign, expected);
+
+ sh->sh_addralign = expected;
+
+ if (gelf_update_shdr(scn, sh) == 0) {
+ pr_err("FAILED cannot update section header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ return 0;
+}
+
+static int elf_collect(struct object *obj)
+{
+ Elf_Scn *scn = NULL;
+ size_t shdrstrndx;
+ int idx = 0;
+ Elf *elf;
+ int fd;
+
+ fd = open(obj->path, O_RDWR, 0666);
+ if (fd == -1) {
+ pr_err("FAILED cannot open %s: %s\n",
+ obj->path, strerror(errno));
+ return -1;
+ }
+
+ elf_version(EV_CURRENT);
+
+ elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL);
+ if (!elf) {
+ close(fd);
+ pr_err("FAILED cannot create ELF descriptor: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+
+ obj->efile.fd = fd;
+ obj->efile.elf = elf;
+
+ elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT);
+
+ if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) {
+ pr_err("FAILED cannot get shdr str ndx\n");
+ return -1;
+ }
+
+ /*
+ * Scan all the elf sections and look for save data
+ * from .BTF_ids section and symbols.
+ */
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ Elf_Data *data;
+ GElf_Shdr sh;
+ char *name;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ pr_err("FAILED get section(%d) header\n", idx);
+ return -1;
+ }
+
+ name = elf_strptr(elf, shdrstrndx, sh.sh_name);
+ if (!name) {
+ pr_err("FAILED get section(%d) name\n", idx);
+ return -1;
+ }
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_err("FAILED to get section(%d) data from %s\n",
+ idx, name);
+ return -1;
+ }
+
+ pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+ idx, name, (unsigned long) data->d_size,
+ (int) sh.sh_link, (unsigned long) sh.sh_flags,
+ (int) sh.sh_type);
+
+ if (sh.sh_type == SHT_SYMTAB) {
+ obj->efile.symbols = data;
+ obj->efile.symbols_shndx = idx;
+ obj->efile.strtabidx = sh.sh_link;
+ } else if (!strcmp(name, BTF_IDS_SECTION)) {
+ obj->efile.idlist = data;
+ obj->efile.idlist_shndx = idx;
+ obj->efile.idlist_addr = sh.sh_addr;
+ }
+
+ if (compressed_section_fix(elf, scn, &sh))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int symbols_collect(struct object *obj)
+{
+ Elf_Scn *scn = NULL;
+ int n, i;
+ GElf_Shdr sh;
+ char *name;
+
+ scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
+ if (!scn)
+ return -1;
+
+ if (gelf_getshdr(scn, &sh) != &sh)
+ return -1;
+
+ n = sh.sh_size / sh.sh_entsize;
+
+ /*
+ * Scan symbols and look for the ones starting with
+ * __BTF_ID__* over .BTF_ids section.
+ */
+ for (i = 0; i < n; i++) {
+ char *prefix;
+ struct btf_id *id;
+ GElf_Sym sym;
+
+ if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ return -1;
+
+ if (sym.st_shndx != obj->efile.idlist_shndx)
+ continue;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name);
+
+ if (!is_btf_id(name))
+ continue;
+
+ /*
+ * __BTF_ID__TYPE__vfs_truncate__0
+ * prefix = ^
+ */
+ prefix = name + sizeof(BTF_ID) - 1;
+
+ /* struct */
+ if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
+ obj->nr_structs++;
+ id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1);
+ /* union */
+ } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) {
+ obj->nr_unions++;
+ id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1);
+ /* typedef */
+ } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) {
+ obj->nr_typedefs++;
+ id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1);
+ /* func */
+ } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
+ obj->nr_funcs++;
+ id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
+ /* set8 */
+ } else if (!strncmp(prefix, BTF_SET8, sizeof(BTF_SET8) - 1)) {
+ id = add_set(obj, prefix, true);
+ /*
+ * SET8 objects store list's count, which is encoded
+ * in symbol's size, together with 'cnt' field hence
+ * that - 1.
+ */
+ if (id) {
+ id->cnt = sym.st_size / sizeof(uint64_t) - 1;
+ id->is_set8 = true;
+ }
+ /* set */
+ } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
+ id = add_set(obj, prefix, false);
+ /*
+ * SET objects store list's count, which is encoded
+ * in symbol's size, together with 'cnt' field hence
+ * that - 1.
+ */
+ if (id) {
+ id->cnt = sym.st_size / sizeof(int) - 1;
+ id->is_set = true;
+ }
+ } else {
+ pr_err("FAILED unsupported prefix %s\n", prefix);
+ return -1;
+ }
+
+ if (!id)
+ return -ENOMEM;
+
+ if (id->addr_cnt >= ADDR_CNT) {
+ pr_err("FAILED symbol %s crossed the number of allowed lists\n",
+ id->name);
+ return -1;
+ }
+ id->addr[id->addr_cnt++] = sym.st_value;
+ }
+
+ return 0;
+}
+
+static int symbols_resolve(struct object *obj)
+{
+ int nr_typedefs = obj->nr_typedefs;
+ int nr_structs = obj->nr_structs;
+ int nr_unions = obj->nr_unions;
+ int nr_funcs = obj->nr_funcs;
+ struct btf *base_btf = NULL;
+ int err, type_id;
+ struct btf *btf;
+ __u32 nr_types;
+
+ if (obj->base_btf_path) {
+ base_btf = btf__parse(obj->base_btf_path, NULL);
+ err = libbpf_get_error(base_btf);
+ if (err) {
+ pr_err("FAILED: load base BTF from %s: %s\n",
+ obj->base_btf_path, strerror(-err));
+ return -1;
+ }
+ }
+
+ btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_err("FAILED: load BTF from %s: %s\n",
+ obj->btf ?: obj->path, strerror(-err));
+ goto out;
+ }
+
+ err = -1;
+ nr_types = btf__type_cnt(btf);
+
+ /*
+ * Iterate all the BTF types and search for collected symbol IDs.
+ */
+ for (type_id = 1; type_id < nr_types; type_id++) {
+ const struct btf_type *type;
+ struct rb_root *root;
+ struct btf_id *id;
+ const char *str;
+ int *nr;
+
+ type = btf__type_by_id(btf, type_id);
+ if (!type) {
+ pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n",
+ type_id);
+ goto out;
+ }
+
+ if (btf_is_func(type) && nr_funcs) {
+ nr = &nr_funcs;
+ root = &obj->funcs;
+ } else if (btf_is_struct(type) && nr_structs) {
+ nr = &nr_structs;
+ root = &obj->structs;
+ } else if (btf_is_union(type) && nr_unions) {
+ nr = &nr_unions;
+ root = &obj->unions;
+ } else if (btf_is_typedef(type) && nr_typedefs) {
+ nr = &nr_typedefs;
+ root = &obj->typedefs;
+ } else
+ continue;
+
+ str = btf__name_by_offset(btf, type->name_off);
+ if (!str) {
+ pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n",
+ type_id);
+ goto out;
+ }
+
+ id = btf_id__find(root, str);
+ if (id) {
+ if (id->id) {
+ pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
+ str, id->id, type_id, id->id);
+ } else {
+ id->id = type_id;
+ (*nr)--;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ btf__free(base_btf);
+ btf__free(btf);
+ return err;
+}
+
+static int id_patch(struct object *obj, struct btf_id *id)
+{
+ Elf_Data *data = obj->efile.idlist;
+ int *ptr = data->d_buf;
+ int i;
+
+ /* For set, set8, id->id may be 0 */
+ if (!id->id && !id->is_set && !id->is_set8)
+ pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
+
+ for (i = 0; i < id->addr_cnt; i++) {
+ unsigned long addr = id->addr[i];
+ unsigned long idx = addr - obj->efile.idlist_addr;
+
+ pr_debug("patching addr %5lu: ID %7d [%s]\n",
+ idx, id->id, id->name);
+
+ if (idx >= data->d_size) {
+ pr_err("FAILED patching index %lu out of bounds %lu\n",
+ idx, data->d_size);
+ return -1;
+ }
+
+ idx = idx / sizeof(int);
+ ptr[idx] = id->id;
+ }
+
+ return 0;
+}
+
+static int __symbols_patch(struct object *obj, struct rb_root *root)
+{
+ struct rb_node *next;
+ struct btf_id *id;
+
+ next = rb_first(root);
+ while (next) {
+ id = rb_entry(next, struct btf_id, rb_node);
+
+ if (id_patch(obj, id))
+ return -1;
+
+ next = rb_next(next);
+ }
+ return 0;
+}
+
+static int cmp_id(const void *pa, const void *pb)
+{
+ const int *a = pa, *b = pb;
+
+ return *a - *b;
+}
+
+static int sets_patch(struct object *obj)
+{
+ Elf_Data *data = obj->efile.idlist;
+ int *ptr = data->d_buf;
+ struct rb_node *next;
+
+ next = rb_first(&obj->sets);
+ while (next) {
+ unsigned long addr, idx;
+ struct btf_id *id;
+ int *base;
+ int cnt;
+
+ id = rb_entry(next, struct btf_id, rb_node);
+ addr = id->addr[0];
+ idx = addr - obj->efile.idlist_addr;
+
+ /* sets are unique */
+ if (id->addr_cnt != 1) {
+ pr_err("FAILED malformed data for set '%s'\n",
+ id->name);
+ return -1;
+ }
+
+ idx = idx / sizeof(int);
+ base = &ptr[idx] + (id->is_set8 ? 2 : 1);
+ cnt = ptr[idx];
+
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+ (idx + 1) * sizeof(int), cnt, id->name);
+
+ qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
+
+ next = rb_next(next);
+ }
+ return 0;
+}
+
+static int symbols_patch(struct object *obj)
+{
+ int err;
+
+ if (__symbols_patch(obj, &obj->structs) ||
+ __symbols_patch(obj, &obj->unions) ||
+ __symbols_patch(obj, &obj->typedefs) ||
+ __symbols_patch(obj, &obj->funcs) ||
+ __symbols_patch(obj, &obj->sets))
+ return -1;
+
+ if (sets_patch(obj))
+ return -1;
+
+ /* Set type to ensure endian translation occurs. */
+ obj->efile.idlist->d_type = ELF_T_WORD;
+
+ elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
+
+ err = elf_update(obj->efile.elf, ELF_C_WRITE);
+ if (err < 0) {
+ pr_err("FAILED elf_update(WRITE): %s\n",
+ elf_errmsg(-1));
+ }
+
+ pr_debug("update %s for %s\n",
+ err >= 0 ? "ok" : "failed", obj->path);
+ return err < 0 ? -1 : 0;
+}
+
+static const char * const resolve_btfids_usage[] = {
+ "resolve_btfids [<options>] <ELF object>",
+ NULL
+};
+
+int main(int argc, const char **argv)
+{
+ struct object obj = {
+ .efile = {
+ .idlist_shndx = -1,
+ .symbols_shndx = -1,
+ },
+ .structs = RB_ROOT,
+ .unions = RB_ROOT,
+ .typedefs = RB_ROOT,
+ .funcs = RB_ROOT,
+ .sets = RB_ROOT,
+ };
+ struct option btfid_options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show errors, etc)"),
+ OPT_STRING(0, "btf", &obj.btf, "BTF data",
+ "BTF data"),
+ OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
+ "path of file providing base BTF"),
+ OPT_END()
+ };
+ int err = -1;
+
+ argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc != 1)
+ usage_with_options(resolve_btfids_usage, btfid_options);
+
+ obj.path = argv[0];
+
+ if (elf_collect(&obj))
+ goto out;
+
+ /*
+ * We did not find .BTF_ids section or symbols section,
+ * nothing to do..
+ */
+ if (obj.efile.idlist_shndx == -1 ||
+ obj.efile.symbols_shndx == -1) {
+ pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
+ err = 0;
+ goto out;
+ }
+
+ if (symbols_collect(&obj))
+ goto out;
+
+ if (symbols_resolve(&obj))
+ goto out;
+
+ if (symbols_patch(&obj))
+ goto out;
+
+ err = 0;
+out:
+ if (obj.efile.elf) {
+ elf_end(obj.efile.elf);
+ close(obj.efile.fd);
+ }
+ return err;
+}
diff --git a/tools/bpf/runqslower/.gitignore b/tools/bpf/runqslower/.gitignore
new file mode 100644
index 0000000000..ffdb70230c
--- /dev/null
+++ b/tools/bpf/runqslower/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/.output
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
new file mode 100644
index 0000000000..d8288936c9
--- /dev/null
+++ b/tools/bpf/runqslower/Makefile
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+include ../../scripts/Makefile.include
+
+OUTPUT ?= $(abspath .output)/
+
+BPFTOOL_OUTPUT := $(OUTPUT)bpftool/
+DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bootstrap/bpftool
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+LIBBPF_SRC := $(abspath ../../lib/bpf)
+BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
+BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
+BPF_DESTDIR := $(BPFOBJ_OUTPUT)
+BPF_INCLUDE := $(BPF_DESTDIR)/include
+INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
+CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
+CFLAGS += $(EXTRA_CFLAGS)
+LDFLAGS += $(EXTRA_LDFLAGS)
+
+# Try to detect best kernel BTF source
+KERNEL_REL := $(shell uname -r)
+VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../vmlinux /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(KERNEL_REL)
+VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
+ $(wildcard $(VMLINUX_BTF_PATHS))))
+
+ifeq ($(V),1)
+Q =
+else
+Q = @
+MAKEFLAGS += --no-print-directory
+submake_extras := feature_display=0
+endif
+
+.DELETE_ON_ERROR:
+
+.PHONY: all clean runqslower libbpf_hdrs
+all: runqslower
+
+runqslower: $(OUTPUT)/runqslower
+
+clean:
+ $(call QUIET_CLEAN, runqslower)
+ $(Q)$(RM) -r $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT)
+ $(Q)$(RM) $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
+ $(Q)$(RM) $(OUTPUT)runqslower
+ $(Q)$(RM) -r .output
+
+libbpf_hdrs: $(BPFOBJ)
+
+$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
+
+$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
+ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
+
+$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs
+
+$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
+ $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
+
+$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT)
+ $(QUIET_GEN)$(CLANG) -g -O2 --target=bpf $(INCLUDES) \
+ -c $(filter %.c,$^) -o $@ && \
+ $(LLVM_STRIP) -g $@
+
+$(OUTPUT)/%.o: %.c | $(OUTPUT)
+ $(QUIET_CC)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
+
+$(OUTPUT) $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT):
+ $(QUIET_MKDIR)mkdir -p $@
+
+$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
+ifeq ($(VMLINUX_H),)
+ $(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \
+ echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \
+ "specify its location." >&2; \
+ exit 1;\
+ fi
+ $(QUIET_GEN)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
+ DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
+
+$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) bootstrap
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
new file mode 100644
index 0000000000..9a5c1f008f
--- /dev/null
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "runqslower.h"
+
+#define TASK_RUNNING 0
+#define BPF_F_CURRENT_CPU 0xffffffffULL
+
+const volatile __u64 min_us = 0;
+const volatile pid_t targ_pid = 0;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, u64);
+} start SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+} events SEC(".maps");
+
+/* record enqueue timestamp */
+__always_inline
+static int trace_enqueue(struct task_struct *t)
+{
+ u32 pid = t->pid;
+ u64 *ptr;
+
+ if (!pid || (targ_pid && targ_pid != pid))
+ return 0;
+
+ ptr = bpf_task_storage_get(&start, t, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ *ptr = bpf_ktime_get_ns();
+ return 0;
+}
+
+SEC("tp_btf/sched_wakeup")
+int handle__sched_wakeup(u64 *ctx)
+{
+ /* TP_PROTO(struct task_struct *p) */
+ struct task_struct *p = (void *)ctx[0];
+
+ return trace_enqueue(p);
+}
+
+SEC("tp_btf/sched_wakeup_new")
+int handle__sched_wakeup_new(u64 *ctx)
+{
+ /* TP_PROTO(struct task_struct *p) */
+ struct task_struct *p = (void *)ctx[0];
+
+ return trace_enqueue(p);
+}
+
+SEC("tp_btf/sched_switch")
+int handle__sched_switch(u64 *ctx)
+{
+ /* TP_PROTO(bool preempt, struct task_struct *prev,
+ * struct task_struct *next)
+ */
+ struct task_struct *prev = (struct task_struct *)ctx[1];
+ struct task_struct *next = (struct task_struct *)ctx[2];
+ struct runq_event event = {};
+ u64 *tsp, delta_us;
+ long state;
+ u32 pid;
+
+ /* ivcsw: treat like an enqueue event and store timestamp */
+ if (prev->__state == TASK_RUNNING)
+ trace_enqueue(prev);
+
+ pid = next->pid;
+
+ /* For pid mismatch, save a bpf_task_storage_get */
+ if (!pid || (targ_pid && targ_pid != pid))
+ return 0;
+
+ /* fetch timestamp and calculate delta */
+ tsp = bpf_task_storage_get(&start, next, 0, 0);
+ if (!tsp)
+ return 0; /* missed enqueue */
+
+ delta_us = (bpf_ktime_get_ns() - *tsp) / 1000;
+ if (min_us && delta_us <= min_us)
+ return 0;
+
+ event.pid = pid;
+ event.delta_us = delta_us;
+ bpf_get_current_comm(&event.task, sizeof(event.task));
+
+ /* output */
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
+ &event, sizeof(event));
+
+ bpf_task_storage_delete(&start, next);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
new file mode 100644
index 0000000000..83c5993a13
--- /dev/null
+++ b/tools/bpf/runqslower/runqslower.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+#include <argp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include "runqslower.h"
+#include "runqslower.skel.h"
+
+struct env {
+ pid_t pid;
+ __u64 min_us;
+ bool verbose;
+} env = {
+ .min_us = 10000,
+};
+
+const char *argp_program_version = "runqslower 0.1";
+const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
+const char argp_program_doc[] =
+"runqslower Trace long process scheduling delays.\n"
+" For Linux, uses eBPF, BPF CO-RE, libbpf, BTF.\n"
+"\n"
+"This script traces high scheduling delays between tasks being\n"
+"ready to run and them running on CPU after that.\n"
+"\n"
+"USAGE: runqslower [-p PID] [min_us]\n"
+"\n"
+"EXAMPLES:\n"
+" runqslower # trace run queue latency higher than 10000 us (default)\n"
+" runqslower 1000 # trace run queue latency higher than 1000 us\n"
+" runqslower -p 123 # trace pid 123 only\n";
+
+static const struct argp_option opts[] = {
+ { "pid", 'p', "PID", 0, "Process PID to trace"},
+ { "verbose", 'v', NULL, 0, "Verbose debug output" },
+ {},
+};
+
+static error_t parse_arg(int key, char *arg, struct argp_state *state)
+{
+ static int pos_args;
+ int pid;
+ long long min_us;
+
+ switch (key) {
+ case 'v':
+ env.verbose = true;
+ break;
+ case 'p':
+ errno = 0;
+ pid = strtol(arg, NULL, 10);
+ if (errno || pid <= 0) {
+ fprintf(stderr, "Invalid PID: %s\n", arg);
+ argp_usage(state);
+ }
+ env.pid = pid;
+ break;
+ case ARGP_KEY_ARG:
+ if (pos_args++) {
+ fprintf(stderr,
+ "Unrecognized positional argument: %s\n", arg);
+ argp_usage(state);
+ }
+ errno = 0;
+ min_us = strtoll(arg, NULL, 10);
+ if (errno || min_us <= 0) {
+ fprintf(stderr, "Invalid delay (in us): %s\n", arg);
+ argp_usage(state);
+ }
+ env.min_us = min_us;
+ break;
+ default:
+ return ARGP_ERR_UNKNOWN;
+ }
+ return 0;
+}
+
+int libbpf_print_fn(enum libbpf_print_level level,
+ const char *format, va_list args)
+{
+ if (level == LIBBPF_DEBUG && !env.verbose)
+ return 0;
+ return vfprintf(stderr, format, args);
+}
+
+void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
+{
+ const struct runq_event *e = data;
+ struct tm *tm;
+ char ts[32];
+ time_t t;
+
+ time(&t);
+ tm = localtime(&t);
+ strftime(ts, sizeof(ts), "%H:%M:%S", tm);
+ printf("%-8s %-16s %-6d %14llu\n", ts, e->task, e->pid, e->delta_us);
+}
+
+void handle_lost_events(void *ctx, int cpu, __u64 lost_cnt)
+{
+ printf("Lost %llu events on CPU #%d!\n", lost_cnt, cpu);
+}
+
+int main(int argc, char **argv)
+{
+ static const struct argp argp = {
+ .options = opts,
+ .parser = parse_arg,
+ .doc = argp_program_doc,
+ };
+ struct perf_buffer *pb = NULL;
+ struct runqslower_bpf *obj;
+ int err;
+
+ err = argp_parse(&argp, argc, argv, 0, NULL, NULL);
+ if (err)
+ return err;
+
+ libbpf_set_print(libbpf_print_fn);
+
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+ obj = runqslower_bpf__open();
+ if (!obj) {
+ fprintf(stderr, "failed to open and/or load BPF object\n");
+ return 1;
+ }
+
+ /* initialize global data (filtering options) */
+ obj->rodata->targ_pid = env.pid;
+ obj->rodata->min_us = env.min_us;
+
+ err = runqslower_bpf__load(obj);
+ if (err) {
+ fprintf(stderr, "failed to load BPF object: %d\n", err);
+ goto cleanup;
+ }
+
+ err = runqslower_bpf__attach(obj);
+ if (err) {
+ fprintf(stderr, "failed to attach BPF programs\n");
+ goto cleanup;
+ }
+
+ printf("Tracing run queue latency higher than %llu us\n", env.min_us);
+ printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
+
+ pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
+ handle_event, handle_lost_events, NULL, NULL);
+ err = libbpf_get_error(pb);
+ if (err) {
+ pb = NULL;
+ fprintf(stderr, "failed to open perf buffer: %d\n", err);
+ goto cleanup;
+ }
+
+ while ((err = perf_buffer__poll(pb, 100)) >= 0)
+ ;
+ printf("Error polling perf buffer: %d\n", err);
+
+cleanup:
+ perf_buffer__free(pb);
+ runqslower_bpf__destroy(obj);
+
+ return err != 0;
+}
diff --git a/tools/bpf/runqslower/runqslower.h b/tools/bpf/runqslower/runqslower.h
new file mode 100644
index 0000000000..4f70f07200
--- /dev/null
+++ b/tools/bpf/runqslower/runqslower.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __RUNQSLOWER_H
+#define __RUNQSLOWER_H
+
+#define TASK_COMM_LEN 16
+
+struct runq_event {
+ char task[TASK_COMM_LEN];
+ __u64 delta_us;
+ pid_t pid;
+};
+
+#endif /* __RUNQSLOWER_H */