diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 21:00:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-19 21:00:37 +0000 |
commit | 94ac2ab3fff96814d7460a27a0e9d004abbd4128 (patch) | |
tree | 9a4eb8cc234b540b0f4b93363109cdd37a20540b /tools/testing/selftests/bpf | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-94ac2ab3fff96814d7460a27a0e9d004abbd4128.tar.xz linux-94ac2ab3fff96814d7460a27a0e9d004abbd4128.zip |
Merging upstream version 6.9.2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/testing/selftests/bpf')
147 files changed, 6204 insertions, 496 deletions
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 5c2cc7e8c5..d8ade15e27 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,6 +1,5 @@ bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 -exceptions # JIT does not support calling kfunc bpf_throw: -524 fexit_sleep # The test never returns. The remaining tests cannot start. kprobe_multi_bench_attach # needs CONFIG_FPROBE kprobe_multi_test # needs CONFIG_FPROBE @@ -11,3 +10,5 @@ fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_mu fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95 missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95) +verifier_arena # JIT does not support arena +arena_htab # JIT does not support arena diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index 1a63996c03..f4a2f66a68 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -3,3 +3,6 @@ exceptions # JIT does not support calling kfunc bpf_throw (exceptions) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?) +verifier_iterating_callbacks +verifier_arena # JIT does not support arena +arena_htab # JIT does not support arena diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index fd15017ed3..3b9eb40d63 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -34,13 +34,26 @@ LIBELF_CFLAGS := $(shell $(PKG_CONFIG) libelf --cflags 2>/dev/null) LIBELF_LIBS := $(shell $(PKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf) CFLAGS += -g $(OPT_FLAGS) -rdynamic \ - -Wall -Werror \ + -Wall -Werror -fno-omit-frame-pointer \ $(GENFLAGS) $(SAN_CFLAGS) $(LIBELF_CFLAGS) \ -I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \ -I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) LDFLAGS += $(SAN_LDFLAGS) LDLIBS += $(LIBELF_LIBS) -lz -lrt -lpthread +# The following tests perform type punning and they may break strict +# aliasing rules, which are exploited by both GCC and clang by default +# while optimizing. This can lead to broken programs. +progs/bind4_prog.c-CFLAGS := -fno-strict-aliasing +progs/bind6_prog.c-CFLAGS := -fno-strict-aliasing +progs/dynptr_fail.c-CFLAGS := -fno-strict-aliasing +progs/linked_list_fail.c-CFLAGS := -fno-strict-aliasing +progs/map_kptr_fail.c-CFLAGS := -fno-strict-aliasing +progs/syscall.c-CFLAGS := -fno-strict-aliasing +progs/test_pkt_md_access.c-CFLAGS := -fno-strict-aliasing +progs/test_sk_lookup.c-CFLAGS := -fno-strict-aliasing +progs/timer_crash.c-CFLAGS := -fno-strict-aliasing + ifneq ($(LLVM),) # Silence some warnings when compiled with clang CFLAGS += -Wno-unused-command-line-argument @@ -64,6 +77,15 @@ TEST_INST_SUBDIRS := no_alu32 ifneq ($(BPF_GCC),) TEST_GEN_PROGS += test_progs-bpf_gcc TEST_INST_SUBDIRS += bpf_gcc + +# The following tests contain C code that, although technically legal, +# triggers GCC warnings that cannot be disabled: declaration of +# anonymous struct types in function parameter lists. +progs/btf_dump_test_case_bitfields.c-CFLAGS := -Wno-error +progs/btf_dump_test_case_namespacing.c-CFLAGS := -Wno-error +progs/btf_dump_test_case_packing.c-CFLAGS := -Wno-error +progs/btf_dump_test_case_padding.c-CFLAGS := -Wno-error +progs/btf_dump_test_case_syntax.c-CFLAGS := -Wno-error endif ifneq ($(CLANG_CPUV4),) @@ -110,7 +132,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \ - xdp_features + xdp_features bpf_test_no_cfi.ko TEST_GEN_FILES += liburandom_read.so urandom_read sign-file uprobe_multi @@ -175,8 +197,7 @@ endif # NOTE: Semicolon at the end is critical to override lib.mk's default static # rule for binaries. $(notdir $(TEST_GEN_PROGS) \ - $(TEST_GEN_PROGS_EXTENDED) \ - $(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ; + $(TEST_GEN_PROGS_EXTENDED)): %: $(OUTPUT)/% ; # sort removes libbpf duplicates when not cross-building MAKE_DIRS := $(sort $(BUILD_DIR)/libbpf $(HOST_BUILD_DIR)/libbpf \ @@ -233,6 +254,12 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_testmo $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_testmod $(Q)cp bpf_testmod/bpf_testmod.ko $@ +$(OUTPUT)/bpf_test_no_cfi.ko: $(VMLINUX_BTF) $(RESOLVE_BTFIDS) $(wildcard bpf_test_no_cfi/Makefile bpf_test_no_cfi/*.[ch]) + $(call msg,MOD,,$@) + $(Q)$(RM) bpf_test_no_cfi/bpf_test_no_cfi.ko # force re-compilation + $(Q)$(MAKE) $(submake_extras) RESOLVE_BTFIDS=$(RESOLVE_BTFIDS) -C bpf_test_no_cfi + $(Q)cp bpf_test_no_cfi/bpf_test_no_cfi.ko $@ + DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool ifneq ($(CROSS_COMPILE),) CROSS_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool @@ -382,11 +409,11 @@ endif CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH)) BPF_CFLAGS = -g -Wall -Werror -D__TARGET_ARCH_$(SRCARCH) $(MENDIAN) \ -I$(INCLUDE_DIR) -I$(CURDIR) -I$(APIDIR) \ - -I$(abspath $(OUTPUT)/../usr/include) + -I$(abspath $(OUTPUT)/../usr/include) \ + -Wno-compare-distinct-pointer-types # TODO: enable me -Wsign-compare -CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) \ - -Wno-compare-distinct-pointer-types +CLANG_CFLAGS = $(CLANG_SYS_INCLUDES) $(OUTPUT)/test_l4lb_noinline.o: BPF_CFLAGS += -fno-inline $(OUTPUT)/test_xdp_noinline.o: BPF_CFLAGS += -fno-inline @@ -504,7 +531,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.bpf.o: \ $(wildcard $(BPFDIR)/*.bpf.h) \ | $(TRUNNER_OUTPUT) $$(BPFOBJ) $$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \ - $(TRUNNER_BPF_CFLAGS)) + $(TRUNNER_BPF_CFLAGS) \ + $$($$<-CFLAGS)) $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -514,6 +542,7 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o) $(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$@ $(Q)$$(BPFTOOL) gen subskeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.bpf.o=)) > $$(@:.skel.h=.subskel.h) + $(Q)rm -f $$(<:.o=.linked1.o) $$(<:.o=.linked2.o) $$(<:.o=.linked3.o) $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) @@ -522,6 +551,7 @@ $(TRUNNER_BPF_LSKELS): %.lskel.h: %.bpf.o $(BPFTOOL) | $(TRUNNER_OUTPUT) $(Q)$$(BPFTOOL) gen object $$(<:.o=.llinked3.o) $$(<:.o=.llinked2.o) $(Q)diff $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o) $(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.llinked3.o) name $$(notdir $$(<:.bpf.o=_lskel)) > $$@ + $(Q)rm -f $$(<:.o=.llinked1.o) $$(<:.o=.llinked2.o) $$(<:.o=.llinked3.o) $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.bpf.o)) @@ -532,6 +562,7 @@ $(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT) $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@) $(Q)$$(BPFTOOL) gen skeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$@ $(Q)$$(BPFTOOL) gen subskeleton $$(@:.skel.h=.linked3.o) name $$(notdir $$(@:.skel.h=)) > $$(@:.skel.h=.subskel.h) + $(Q)rm -f $$(@:.skel.h=.linked1.o) $$(@:.skel.h=.linked2.o) $$(@:.skel.h=.linked3.o) endif # ensure we set up tests.h header generation rule just once @@ -606,6 +637,7 @@ TRUNNER_EXTRA_SOURCES := test_progs.c \ flow_dissector_load.h \ ip_check_defrag_frags.h TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ + $(OUTPUT)/bpf_test_no_cfi.ko \ $(OUTPUT)/liburandom_read.so \ $(OUTPUT)/xdp_synproxy \ $(OUTPUT)/sign-file \ @@ -729,11 +761,12 @@ $(OUTPUT)/uprobe_multi: uprobe_multi.c $(call msg,BINARY,,$@) $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ $(LDLIBS) -o $@ -EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ +EXTRA_CLEAN := $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \ prog_tests/tests.h map_tests/tests.h verifier/tests.h \ feature bpftool \ $(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h *.subskel.h \ no_alu32 cpuv4 bpf_gcc bpf_testmod.ko \ + bpf_test_no_cfi.ko \ liburandom_read.so) .PHONY: docs docs-clean diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst index 9af79c7a9b..9b974e425a 100644 --- a/tools/testing/selftests/bpf/README.rst +++ b/tools/testing/selftests/bpf/README.rst @@ -115,7 +115,7 @@ the insn 20 undoes map_value addition. It is currently impossible for the verifier to understand such speculative pointer arithmetic. Hence `this patch`__ addresses it on the compiler side. It was committed on llvm 12. -__ https://reviews.llvm.org/D85570 +__ https://github.com/llvm/llvm-project/commit/ddf1864ace484035e3cde5e83b3a31ac81e059c6 The corresponding C code @@ -165,7 +165,7 @@ This is due to a llvm BPF backend bug. `The fix`__ has been pushed to llvm 10.x release branch and will be available in 10.0.1. The patch is available in llvm 11.0.0 trunk. -__ https://reviews.llvm.org/D78466 +__ https://github.com/llvm/llvm-project/commit/3cb7e7bf959dcd3b8080986c62e10a75c7af43f0 bpf_verif_scale/loop6.bpf.o test failure with Clang 12 ====================================================== @@ -204,7 +204,7 @@ r5(w5) is eventually saved on stack at insn #24 for later use. This cause later verifier failure. The bug has been `fixed`__ in Clang 13. -__ https://reviews.llvm.org/D97479 +__ https://github.com/llvm/llvm-project/commit/1959ead525b8830cc8a345f45e1c3ef9902d3229 BPF CO-RE-based tests and Clang version ======================================= @@ -221,11 +221,11 @@ failures: - __builtin_btf_type_id() [0_, 1_, 2_]; - __builtin_preserve_type_info(), __builtin_preserve_enum_value() [3_, 4_]. -.. _0: https://reviews.llvm.org/D74572 -.. _1: https://reviews.llvm.org/D74668 -.. _2: https://reviews.llvm.org/D85174 -.. _3: https://reviews.llvm.org/D83878 -.. _4: https://reviews.llvm.org/D83242 +.. _0: https://github.com/llvm/llvm-project/commit/6b01b465388b204d543da3cf49efd6080db094a9 +.. _1: https://github.com/llvm/llvm-project/commit/072cde03aaa13a2c57acf62d79876bf79aa1919f +.. _2: https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99 +.. _3: https://github.com/llvm/llvm-project/commit/6d218b4adb093ff2e9764febbbc89f429412006c +.. _4: https://github.com/llvm/llvm-project/commit/6d6750696400e7ce988d66a1a00e1d0cb32815f8 Floating-point tests and Clang version ====================================== @@ -234,7 +234,7 @@ Certain selftests, e.g. core_reloc, require support for the floating-point types, which was introduced in `Clang 13`__. The older Clang versions will either crash when compiling these tests, or generate an incorrect BTF. -__ https://reviews.llvm.org/D83289 +__ https://github.com/llvm/llvm-project/commit/a7137b238a07d9399d3ae96c0b461571bd5aa8b2 Kernel function call test and Clang version =========================================== @@ -248,7 +248,7 @@ Without it, the error from compiling bpf selftests looks like: libbpf: failed to find BTF for extern 'tcp_slow_start' [25] section: -2 -__ https://reviews.llvm.org/D93563 +__ https://github.com/llvm/llvm-project/commit/886f9ff53155075bd5f1e994f17b85d1e1b7470c btf_tag test and Clang version ============================== @@ -264,8 +264,8 @@ Without them, the btf_tag selftest will be skipped and you will observe: #<test_num> btf_tag:SKIP -.. _0: https://reviews.llvm.org/D111588 -.. _1: https://reviews.llvm.org/D111199 +.. _0: https://github.com/llvm/llvm-project/commit/a162b67c98066218d0d00aa13b99afb95d9bb5e6 +.. _1: https://github.com/llvm/llvm-project/commit/3466e00716e12e32fdb100e3fcfca5c2b3e8d784 Clang dependencies for static linking tests =========================================== @@ -274,7 +274,7 @@ linked_vars, linked_maps, and linked_funcs tests depend on `Clang fix`__ to generate valid BTF information for weak variables. Please make sure you use Clang that contains the fix. -__ https://reviews.llvm.org/D100362 +__ https://github.com/llvm/llvm-project/commit/968292cb93198442138128d850fd54dc7edc0035 Clang relocation changes ======================== @@ -292,7 +292,7 @@ Here, ``type 2`` refers to new relocation type ``R_BPF_64_ABS64``. To fix this issue, user newer libbpf. .. Links -.. _clang reloc patch: https://reviews.llvm.org/D102712 +.. _clang reloc patch: https://github.com/llvm/llvm-project/commit/6a2ea84600ba4bd3b2733bd8f08f5115eb32164b .. _kernel llvm reloc: /Documentation/bpf/llvm_reloc.rst Clang dependencies for the u32 spill test (xdpwall) @@ -304,6 +304,6 @@ from running test_progs will look like: .. code-block:: console - test_xdpwall:FAIL:Does LLVM have https://reviews.llvm.org/D109073? unexpected error: -4007 + test_xdpwall:FAIL:Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5? unexpected error: -4007 -__ https://reviews.llvm.org/D109073 +__ https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5 diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index 73ce11b054..b2b4c391eb 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -323,14 +323,14 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) break; case 'p': env.producer_cnt = strtol(arg, NULL, 10); - if (env.producer_cnt <= 0) { + if (env.producer_cnt < 0) { fprintf(stderr, "Invalid producer count: %s\n", arg); argp_usage(state); } break; case 'c': env.consumer_cnt = strtol(arg, NULL, 10); - if (env.consumer_cnt <= 0) { + if (env.consumer_cnt < 0) { fprintf(stderr, "Invalid consumer count: %s\n", arg); argp_usage(state); } @@ -495,14 +495,20 @@ extern const struct bench bench_trig_base; extern const struct bench bench_trig_tp; extern const struct bench bench_trig_rawtp; extern const struct bench bench_trig_kprobe; +extern const struct bench bench_trig_kretprobe; +extern const struct bench bench_trig_kprobe_multi; +extern const struct bench bench_trig_kretprobe_multi; extern const struct bench bench_trig_fentry; +extern const struct bench bench_trig_fexit; extern const struct bench bench_trig_fentry_sleep; extern const struct bench bench_trig_fmodret; extern const struct bench bench_trig_uprobe_base; -extern const struct bench bench_trig_uprobe_with_nop; -extern const struct bench bench_trig_uretprobe_with_nop; -extern const struct bench bench_trig_uprobe_without_nop; -extern const struct bench bench_trig_uretprobe_without_nop; +extern const struct bench bench_trig_uprobe_nop; +extern const struct bench bench_trig_uretprobe_nop; +extern const struct bench bench_trig_uprobe_push; +extern const struct bench bench_trig_uretprobe_push; +extern const struct bench bench_trig_uprobe_ret; +extern const struct bench bench_trig_uretprobe_ret; extern const struct bench bench_rb_libbpf; extern const struct bench bench_rb_custom; extern const struct bench bench_pb_libbpf; @@ -537,14 +543,20 @@ static const struct bench *benchs[] = { &bench_trig_tp, &bench_trig_rawtp, &bench_trig_kprobe, + &bench_trig_kretprobe, + &bench_trig_kprobe_multi, + &bench_trig_kretprobe_multi, &bench_trig_fentry, + &bench_trig_fexit, &bench_trig_fentry_sleep, &bench_trig_fmodret, &bench_trig_uprobe_base, - &bench_trig_uprobe_with_nop, - &bench_trig_uretprobe_with_nop, - &bench_trig_uprobe_without_nop, - &bench_trig_uretprobe_without_nop, + &bench_trig_uprobe_nop, + &bench_trig_uretprobe_nop, + &bench_trig_uprobe_push, + &bench_trig_uretprobe_push, + &bench_trig_uprobe_ret, + &bench_trig_uretprobe_ret, &bench_rb_libbpf, &bench_rb_custom, &bench_pb_libbpf, @@ -607,6 +619,10 @@ static void setup_benchmark(void) bench->setup(); for (i = 0; i < env.consumer_cnt; i++) { + if (!bench->consumer_thread) { + fprintf(stderr, "benchmark doesn't support consumers!\n"); + exit(1); + } err = pthread_create(&state.consumers[i], NULL, bench->consumer_thread, (void *)(long)i); if (err) { @@ -626,6 +642,10 @@ static void setup_benchmark(void) env.prod_cpus.next_cpu = env.cons_cpus.next_cpu; for (i = 0; i < env.producer_cnt; i++) { + if (!bench->producer_thread) { + fprintf(stderr, "benchmark doesn't support producers!\n"); + exit(1); + } err = pthread_create(&state.producers[i], NULL, bench->producer_thread, (void *)(long)i); if (err) { diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index dbd362771d..ace0d1011a 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -85,12 +85,36 @@ static void trigger_kprobe_setup(void) attach_bpf(ctx.skel->progs.bench_trigger_kprobe); } +static void trigger_kretprobe_setup(void) +{ + setup_ctx(); + attach_bpf(ctx.skel->progs.bench_trigger_kretprobe); +} + +static void trigger_kprobe_multi_setup(void) +{ + setup_ctx(); + attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi); +} + +static void trigger_kretprobe_multi_setup(void) +{ + setup_ctx(); + attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi); +} + static void trigger_fentry_setup(void) { setup_ctx(); attach_bpf(ctx.skel->progs.bench_trigger_fentry); } +static void trigger_fexit_setup(void) +{ + setup_ctx(); + attach_bpf(ctx.skel->progs.bench_trigger_fexit); +} + static void trigger_fentry_sleep_setup(void) { setup_ctx(); @@ -113,12 +137,25 @@ static void trigger_fmodret_setup(void) * GCC doesn't generate stack setup preample for these functions due to them * having no input arguments and doing nothing in the body. */ -__weak void uprobe_target_with_nop(void) +__weak void uprobe_target_nop(void) { asm volatile ("nop"); } -__weak void uprobe_target_without_nop(void) +__weak void opaque_noop_func(void) +{ +} + +__weak int uprobe_target_push(void) +{ + /* overhead of function call is negligible compared to uprobe + * triggering, so this shouldn't affect benchmark results much + */ + opaque_noop_func(); + return 1; +} + +__weak void uprobe_target_ret(void) { asm volatile (""); } @@ -126,27 +163,34 @@ __weak void uprobe_target_without_nop(void) static void *uprobe_base_producer(void *input) { while (true) { - uprobe_target_with_nop(); + uprobe_target_nop(); atomic_inc(&base_hits.value); } return NULL; } -static void *uprobe_producer_with_nop(void *input) +static void *uprobe_producer_nop(void *input) +{ + while (true) + uprobe_target_nop(); + return NULL; +} + +static void *uprobe_producer_push(void *input) { while (true) - uprobe_target_with_nop(); + uprobe_target_push(); return NULL; } -static void *uprobe_producer_without_nop(void *input) +static void *uprobe_producer_ret(void *input) { while (true) - uprobe_target_without_nop(); + uprobe_target_ret(); return NULL; } -static void usetup(bool use_retprobe, bool use_nop) +static void usetup(bool use_retprobe, void *target_addr) { size_t uprobe_offset; struct bpf_link *link; @@ -159,11 +203,7 @@ static void usetup(bool use_retprobe, bool use_nop) exit(1); } - if (use_nop) - uprobe_offset = get_uprobe_offset(&uprobe_target_with_nop); - else - uprobe_offset = get_uprobe_offset(&uprobe_target_without_nop); - + uprobe_offset = get_uprobe_offset(target_addr); link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe, use_retprobe, -1 /* all PIDs */, @@ -176,24 +216,34 @@ static void usetup(bool use_retprobe, bool use_nop) ctx.skel->links.bench_trigger_uprobe = link; } -static void uprobe_setup_with_nop(void) +static void uprobe_setup_nop(void) { - usetup(false, true); + usetup(false, &uprobe_target_nop); } -static void uretprobe_setup_with_nop(void) +static void uretprobe_setup_nop(void) { - usetup(true, true); + usetup(true, &uprobe_target_nop); } -static void uprobe_setup_without_nop(void) +static void uprobe_setup_push(void) { - usetup(false, false); + usetup(false, &uprobe_target_push); } -static void uretprobe_setup_without_nop(void) +static void uretprobe_setup_push(void) { - usetup(true, false); + usetup(true, &uprobe_target_push); +} + +static void uprobe_setup_ret(void) +{ + usetup(false, &uprobe_target_ret); +} + +static void uretprobe_setup_ret(void) +{ + usetup(true, &uprobe_target_ret); } const struct bench bench_trig_base = { @@ -235,6 +285,36 @@ const struct bench bench_trig_kprobe = { .report_final = hits_drops_report_final, }; +const struct bench bench_trig_kretprobe = { + .name = "trig-kretprobe", + .validate = trigger_validate, + .setup = trigger_kretprobe_setup, + .producer_thread = trigger_producer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_kprobe_multi = { + .name = "trig-kprobe-multi", + .validate = trigger_validate, + .setup = trigger_kprobe_multi_setup, + .producer_thread = trigger_producer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_kretprobe_multi = { + .name = "trig-kretprobe-multi", + .validate = trigger_validate, + .setup = trigger_kretprobe_multi_setup, + .producer_thread = trigger_producer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + const struct bench bench_trig_fentry = { .name = "trig-fentry", .validate = trigger_validate, @@ -245,6 +325,16 @@ const struct bench bench_trig_fentry = { .report_final = hits_drops_report_final, }; +const struct bench bench_trig_fexit = { + .name = "trig-fexit", + .validate = trigger_validate, + .setup = trigger_fexit_setup, + .producer_thread = trigger_producer, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + const struct bench bench_trig_fentry_sleep = { .name = "trig-fentry-sleep", .validate = trigger_validate, @@ -274,37 +364,55 @@ const struct bench bench_trig_uprobe_base = { .report_final = hits_drops_report_final, }; -const struct bench bench_trig_uprobe_with_nop = { - .name = "trig-uprobe-with-nop", - .setup = uprobe_setup_with_nop, - .producer_thread = uprobe_producer_with_nop, +const struct bench bench_trig_uprobe_nop = { + .name = "trig-uprobe-nop", + .setup = uprobe_setup_nop, + .producer_thread = uprobe_producer_nop, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uretprobe_nop = { + .name = "trig-uretprobe-nop", + .setup = uretprobe_setup_nop, + .producer_thread = uprobe_producer_nop, + .measure = trigger_measure, + .report_progress = hits_drops_report_progress, + .report_final = hits_drops_report_final, +}; + +const struct bench bench_trig_uprobe_push = { + .name = "trig-uprobe-push", + .setup = uprobe_setup_push, + .producer_thread = uprobe_producer_push, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, }; -const struct bench bench_trig_uretprobe_with_nop = { - .name = "trig-uretprobe-with-nop", - .setup = uretprobe_setup_with_nop, - .producer_thread = uprobe_producer_with_nop, +const struct bench bench_trig_uretprobe_push = { + .name = "trig-uretprobe-push", + .setup = uretprobe_setup_push, + .producer_thread = uprobe_producer_push, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, }; -const struct bench bench_trig_uprobe_without_nop = { - .name = "trig-uprobe-without-nop", - .setup = uprobe_setup_without_nop, - .producer_thread = uprobe_producer_without_nop, +const struct bench bench_trig_uprobe_ret = { + .name = "trig-uprobe-ret", + .setup = uprobe_setup_ret, + .producer_thread = uprobe_producer_ret, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, }; -const struct bench bench_trig_uretprobe_without_nop = { - .name = "trig-uretprobe-without-nop", - .setup = uretprobe_setup_without_nop, - .producer_thread = uprobe_producer_without_nop, +const struct bench bench_trig_uretprobe_ret = { + .name = "trig-uretprobe-ret", + .setup = uretprobe_setup_ret, + .producer_thread = uprobe_producer_ret, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh new file mode 100755 index 0000000000..9bdcc74e03 --- /dev/null +++ b/tools/testing/selftests/bpf/benchs/run_bench_uprobes.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -eufo pipefail + +for i in base {uprobe,uretprobe}-{nop,push,ret} +do + summary=$(sudo ./bench -w2 -d5 -a trig-$i | tail -n1 | cut -d'(' -f1 | cut -d' ' -f3-) + printf "%-15s: %s\n" $i "$summary" +done diff --git a/tools/testing/selftests/bpf/bpf_arena_alloc.h b/tools/testing/selftests/bpf/bpf_arena_alloc.h new file mode 100644 index 0000000000..c27678299e --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_arena_alloc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once +#include "bpf_arena_common.h" + +#ifndef __round_mask +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#endif +#ifndef round_up +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#endif + +#ifdef __BPF__ +#define NR_CPUS (sizeof(struct cpumask) * 8) + +static void __arena * __arena page_frag_cur_page[NR_CPUS]; +static int __arena page_frag_cur_offset[NR_CPUS]; + +/* Simple page_frag allocator */ +static inline void __arena* bpf_alloc(unsigned int size) +{ + __u64 __arena *obj_cnt; + __u32 cpu = bpf_get_smp_processor_id(); + void __arena *page = page_frag_cur_page[cpu]; + int __arena *cur_offset = &page_frag_cur_offset[cpu]; + int offset; + + size = round_up(size, 8); + if (size >= PAGE_SIZE - 8) + return NULL; + if (!page) { +refill: + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page) + return NULL; + cast_kern(page); + page_frag_cur_page[cpu] = page; + *cur_offset = PAGE_SIZE - 8; + obj_cnt = page + PAGE_SIZE - 8; + *obj_cnt = 0; + } else { + cast_kern(page); + obj_cnt = page + PAGE_SIZE - 8; + } + + offset = *cur_offset - size; + if (offset < 0) + goto refill; + + (*obj_cnt)++; + *cur_offset = offset; + return page + offset; +} + +static inline void bpf_free(void __arena *addr) +{ + __u64 __arena *obj_cnt; + + addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1)); + obj_cnt = addr + PAGE_SIZE - 8; + if (--(*obj_cnt) == 0) + bpf_arena_free_pages(&arena, addr, 1); +} +#else +static inline void __arena* bpf_alloc(unsigned int size) { return NULL; } +static inline void bpf_free(void __arena *addr) {} +#endif diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h new file mode 100644 index 0000000000..567491f3e1 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_arena_common.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once + +#ifndef WRITE_ONCE +#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) +#endif + +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + +#ifndef arena_container_of +#define arena_container_of(ptr, type, member) \ + ({ \ + void __arena *__mptr = (void __arena *)(ptr); \ + ((type *)(__mptr - offsetof(type, member))); \ + }) +#endif + +#ifdef __BPF__ /* when compiled as bpf program */ + +#ifndef PAGE_SIZE +#define PAGE_SIZE __PAGE_SIZE +/* + * for older kernels try sizeof(struct genradix_node) + * or flexible: + * static inline long __bpf_page_size(void) { + * return bpf_core_enum_value(enum page_size_enum___l, __PAGE_SIZE___l) ?: sizeof(struct genradix_node); + * } + * but generated code is not great. + */ +#endif + +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) +#define __arena __attribute__((address_space(1))) +#define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ +#define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ +#else +#define __arena +#define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1) +#define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0) +#endif + +void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, + int node_id, __u64 flags) __ksym __weak; +void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; + +#else /* when compiled as user space code */ + +#define __arena +#define __arg_arena +#define cast_kern(ptr) /* nop for user space */ +#define cast_user(ptr) /* nop for user space */ +__weak char arena[1]; + +#ifndef offsetof +#define offsetof(type, member) ((unsigned long)&((type *)0)->member) +#endif + +static inline void __arena* bpf_arena_alloc_pages(void *map, void *addr, __u32 page_cnt, + int node_id, __u64 flags) +{ + return NULL; +} +static inline void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) +{ +} + +#endif diff --git a/tools/testing/selftests/bpf/bpf_arena_htab.h b/tools/testing/selftests/bpf/bpf_arena_htab.h new file mode 100644 index 0000000000..acc01a8766 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_arena_htab.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once +#include <errno.h> +#include "bpf_arena_alloc.h" +#include "bpf_arena_list.h" + +struct htab_bucket { + struct arena_list_head head; +}; +typedef struct htab_bucket __arena htab_bucket_t; + +struct htab { + htab_bucket_t *buckets; + int n_buckets; +}; +typedef struct htab __arena htab_t; + +static inline htab_bucket_t *__select_bucket(htab_t *htab, __u32 hash) +{ + htab_bucket_t *b = htab->buckets; + + cast_kern(b); + return &b[hash & (htab->n_buckets - 1)]; +} + +static inline arena_list_head_t *select_bucket(htab_t *htab, __u32 hash) +{ + return &__select_bucket(htab, hash)->head; +} + +struct hashtab_elem { + int hash; + int key; + int value; + struct arena_list_node hash_node; +}; +typedef struct hashtab_elem __arena hashtab_elem_t; + +static hashtab_elem_t *lookup_elem_raw(arena_list_head_t *head, __u32 hash, int key) +{ + hashtab_elem_t *l; + + list_for_each_entry(l, head, hash_node) + if (l->hash == hash && l->key == key) + return l; + + return NULL; +} + +static int htab_hash(int key) +{ + return key; +} + +__weak int htab_lookup_elem(htab_t *htab __arg_arena, int key) +{ + hashtab_elem_t *l_old; + arena_list_head_t *head; + + cast_kern(htab); + head = select_bucket(htab, key); + l_old = lookup_elem_raw(head, htab_hash(key), key); + if (l_old) + return l_old->value; + return 0; +} + +__weak int htab_update_elem(htab_t *htab __arg_arena, int key, int value) +{ + hashtab_elem_t *l_new = NULL, *l_old; + arena_list_head_t *head; + + cast_kern(htab); + head = select_bucket(htab, key); + l_old = lookup_elem_raw(head, htab_hash(key), key); + + l_new = bpf_alloc(sizeof(*l_new)); + if (!l_new) + return -ENOMEM; + l_new->key = key; + l_new->hash = htab_hash(key); + l_new->value = value; + + list_add_head(&l_new->hash_node, head); + if (l_old) { + list_del(&l_old->hash_node); + bpf_free(l_old); + } + return 0; +} + +void htab_init(htab_t *htab) +{ + void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); + + cast_user(buckets); + htab->buckets = buckets; + htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket); +} diff --git a/tools/testing/selftests/bpf/bpf_arena_list.h b/tools/testing/selftests/bpf/bpf_arena_list.h new file mode 100644 index 0000000000..b99b9f408e --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_arena_list.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#pragma once +#include "bpf_arena_common.h" + +struct arena_list_node; + +typedef struct arena_list_node __arena arena_list_node_t; + +struct arena_list_node { + arena_list_node_t *next; + arena_list_node_t * __arena *pprev; +}; + +struct arena_list_head { + struct arena_list_node __arena *first; +}; +typedef struct arena_list_head __arena arena_list_head_t; + +#define list_entry(ptr, type, member) arena_container_of(ptr, type, member) + +#define list_entry_safe(ptr, type, member) \ + ({ typeof(*ptr) * ___ptr = (ptr); \ + ___ptr ? ({ cast_kern(___ptr); list_entry(___ptr, type, member); }) : NULL; \ + }) + +#ifndef __BPF__ +static inline void *bpf_iter_num_new(struct bpf_iter_num *it, int i, int j) { return NULL; } +static inline void bpf_iter_num_destroy(struct bpf_iter_num *it) {} +static inline bool bpf_iter_num_next(struct bpf_iter_num *it) { return true; } +#define cond_break ({}) +#endif + +/* Safely walk link list elements. Deletion of elements is allowed. */ +#define list_for_each_entry(pos, head, member) \ + for (void * ___tmp = (pos = list_entry_safe((head)->first, \ + typeof(*(pos)), member), \ + (void *)0); \ + pos && ({ ___tmp = (void *)pos->member.next; 1; }); \ + cond_break, \ + pos = list_entry_safe((void __arena *)___tmp, typeof(*(pos)), member)) + +static inline void list_add_head(arena_list_node_t *n, arena_list_head_t *h) +{ + arena_list_node_t *first = h->first, * __arena *tmp; + + cast_user(first); + cast_kern(n); + WRITE_ONCE(n->next, first); + cast_kern(first); + if (first) { + tmp = &n->next; + cast_user(tmp); + WRITE_ONCE(first->pprev, tmp); + } + cast_user(n); + WRITE_ONCE(h->first, n); + + tmp = &h->first; + cast_user(tmp); + cast_kern(n); + WRITE_ONCE(n->pprev, tmp); +} + +static inline void __list_del(arena_list_node_t *n) +{ + arena_list_node_t *next = n->next, *tmp; + arena_list_node_t * __arena *pprev = n->pprev; + + cast_user(next); + cast_kern(pprev); + tmp = *pprev; + cast_kern(tmp); + WRITE_ONCE(tmp, next); + if (next) { + cast_user(pprev); + cast_kern(next); + WRITE_ONCE(next->pprev, pprev); + } +} + +#define POISON_POINTER_DELTA 0 + +#define LIST_POISON1 ((void __arena *) 0x100 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void __arena *) 0x122 + POISON_POINTER_DELTA) + +static inline void list_del(arena_list_node_t *n) +{ + __list_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index f44875f8b3..a5b9df38c1 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -260,11 +260,11 @@ extern void bpf_throw(u64 cookie) __ksym; #define __is_signed_type(type) (((type)(-1)) < (type)1) -#define __bpf_cmp(LHS, OP, SIGN, PRED, RHS, DEFAULT) \ +#define __bpf_cmp(LHS, OP, PRED, RHS, DEFAULT) \ ({ \ __label__ l_true; \ bool ret = DEFAULT; \ - asm volatile goto("if %[lhs] " SIGN #OP " %[rhs] goto %l[l_true]" \ + asm volatile goto("if %[lhs] " OP " %[rhs] goto %l[l_true]" \ :: [lhs] "r"((short)LHS), [rhs] PRED (RHS) :: l_true); \ ret = !DEFAULT; \ l_true: \ @@ -276,7 +276,7 @@ l_true: \ * __lhs OP __rhs below will catch the mistake. * Be aware that we check only __lhs to figure out the sign of compare. */ -#define _bpf_cmp(LHS, OP, RHS, NOFLIP) \ +#define _bpf_cmp(LHS, OP, RHS, UNLIKELY) \ ({ \ typeof(LHS) __lhs = (LHS); \ typeof(RHS) __rhs = (RHS); \ @@ -285,14 +285,17 @@ l_true: \ (void)(__lhs OP __rhs); \ if (__cmp_cannot_be_signed(OP) || !__is_signed_type(typeof(__lhs))) { \ if (sizeof(__rhs) == 8) \ - ret = __bpf_cmp(__lhs, OP, "", "r", __rhs, NOFLIP); \ + /* "i" will truncate 64-bit constant into s32, \ + * so we have to use extra register via "r". \ + */ \ + ret = __bpf_cmp(__lhs, #OP, "r", __rhs, UNLIKELY); \ else \ - ret = __bpf_cmp(__lhs, OP, "", "i", __rhs, NOFLIP); \ + ret = __bpf_cmp(__lhs, #OP, "ri", __rhs, UNLIKELY); \ } else { \ if (sizeof(__rhs) == 8) \ - ret = __bpf_cmp(__lhs, OP, "s", "r", __rhs, NOFLIP); \ + ret = __bpf_cmp(__lhs, "s"#OP, "r", __rhs, UNLIKELY); \ else \ - ret = __bpf_cmp(__lhs, OP, "s", "i", __rhs, NOFLIP); \ + ret = __bpf_cmp(__lhs, "s"#OP, "ri", __rhs, UNLIKELY); \ } \ ret; \ }) @@ -304,7 +307,7 @@ l_true: \ #ifndef bpf_cmp_likely #define bpf_cmp_likely(LHS, OP, RHS) \ ({ \ - bool ret; \ + bool ret = 0; \ if (__builtin_strcmp(#OP, "==") == 0) \ ret = _bpf_cmp(LHS, !=, RHS, false); \ else if (__builtin_strcmp(#OP, "!=") == 0) \ @@ -318,16 +321,71 @@ l_true: \ else if (__builtin_strcmp(#OP, ">=") == 0) \ ret = _bpf_cmp(LHS, <, RHS, false); \ else \ - (void) "bug"; \ + asm volatile("r0 " #OP " invalid compare"); \ ret; \ }) #endif +#define cond_break \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long ((%l[l_break] - 1b - 8) / 8) & 0xffff; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: break; \ + l_continue:; \ + }) + #ifndef bpf_nop_mov #define bpf_nop_mov(var) \ asm volatile("%[reg]=%[reg]"::[reg]"r"((short)var)) #endif +/* emit instruction: + * rX = rX .off = BPF_ADDR_SPACE_CAST .imm32 = (dst_as << 16) | src_as + */ +#ifndef bpf_addr_space_cast +#define bpf_addr_space_cast(var, dst_as, src_as)\ + asm volatile(".byte 0xBF; \ + .ifc %[reg], r0; \ + .byte 0x00; \ + .endif; \ + .ifc %[reg], r1; \ + .byte 0x11; \ + .endif; \ + .ifc %[reg], r2; \ + .byte 0x22; \ + .endif; \ + .ifc %[reg], r3; \ + .byte 0x33; \ + .endif; \ + .ifc %[reg], r4; \ + .byte 0x44; \ + .endif; \ + .ifc %[reg], r5; \ + .byte 0x55; \ + .endif; \ + .ifc %[reg], r6; \ + .byte 0x66; \ + .endif; \ + .ifc %[reg], r7; \ + .byte 0x77; \ + .endif; \ + .ifc %[reg], r8; \ + .byte 0x88; \ + .endif; \ + .ifc %[reg], r9; \ + .byte 0x99; \ + .endif; \ + .short %[off]; \ + .long %[as]" \ + : [reg]"+r"(var) \ + : [off]"i"(BPF_ADDR_SPACE_CAST) \ + , [as]"i"((dst_as << 16) | src_as)); +#endif + /* Description * Assert that a conditional expression is true. * Returns diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index b4e78c1eb3..14ebe7d9e1 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -9,7 +9,7 @@ struct bpf_sock_addr_kern; * Error code */ extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, - struct bpf_dynptr *ptr__uninit) __ksym; + struct bpf_dynptr *ptr__uninit) __ksym __weak; /* Description * Initializes an xdp-type dynptr @@ -17,7 +17,7 @@ extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, * Error code */ extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags, - struct bpf_dynptr *ptr__uninit) __ksym; + struct bpf_dynptr *ptr__uninit) __ksym __weak; /* Description * Obtain a read-only pointer to the dynptr's data @@ -26,7 +26,7 @@ extern int bpf_dynptr_from_xdp(struct xdp_md *xdp, __u64 flags, * buffer if unable to obtain a direct pointer */ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, - void *buffer, __u32 buffer__szk) __ksym; + void *buffer, __u32 buffer__szk) __ksym __weak; /* Description * Obtain a read-write pointer to the dynptr's data @@ -35,13 +35,13 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, * buffer if unable to obtain a direct pointer */ extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset, - void *buffer, __u32 buffer__szk) __ksym; + void *buffer, __u32 buffer__szk) __ksym __weak; -extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym; -extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym; -extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym; -extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym; -extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym; +extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym __weak; +extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym __weak; +extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym __weak; +extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym __weak; +extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym __weak; /* Description * Modify the address of a AF_UNIX sockaddr. @@ -51,9 +51,19 @@ extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clo extern int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, const __u8 *sun_path, __u32 sun_path__sz) __ksym; +/* Description + * Allocate and configure a reqsk and link it with a listener and skb. + * Returns + * Error code + */ +struct sock; +struct bpf_tcp_req_attrs; +extern int bpf_sk_assign_tcp_reqsk(struct __sk_buff *skb, struct sock *sk, + struct bpf_tcp_req_attrs *attrs, int attrs__sz) __ksym; + void *bpf_cast_to_kern_ctx(void *) __ksym; -void *bpf_rdonly_cast(void *obj, __u32 btf_id) __ksym; +extern void *bpf_rdonly_cast(const void *obj, __u32 btf_id) __ksym __weak; extern int bpf_get_file_xattr(struct file *file, const char *name, struct bpf_dynptr *value_ptr) __ksym; diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile new file mode 100644 index 0000000000..ed5143b79e --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/Makefile @@ -0,0 +1,19 @@ +BPF_TEST_NO_CFI_DIR := $(realpath $(dir $(abspath $(lastword $(MAKEFILE_LIST))))) +KDIR ?= $(abspath $(BPF_TEST_NO_CFI_DIR)/../../../../..) + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +MODULES = bpf_test_no_cfi.ko + +obj-m += bpf_test_no_cfi.o + +all: + +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) modules + +clean: + +$(Q)make -C $(KDIR) M=$(BPF_TEST_NO_CFI_DIR) clean + diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c new file mode 100644 index 0000000000..b1dd889d5d --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <linux/bpf.h> +#include <linux/btf.h> +#include <linux/init.h> +#include <linux/module.h> + +struct bpf_test_no_cfi_ops { + void (*fn_1)(void); + void (*fn_2)(void); +}; + +static int dummy_init(struct btf *btf) +{ + return 0; +} + +static int dummy_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + return 0; +} + +static int dummy_reg(void *kdata) +{ + return 0; +} + +static void dummy_unreg(void *kdata) +{ +} + +static const struct bpf_verifier_ops dummy_verifier_ops; + +static void bpf_test_no_cfi_ops__fn_1(void) +{ +} + +static void bpf_test_no_cfi_ops__fn_2(void) +{ +} + +static struct bpf_test_no_cfi_ops __test_no_cif_ops = { + .fn_1 = bpf_test_no_cfi_ops__fn_1, + .fn_2 = bpf_test_no_cfi_ops__fn_2, +}; + +static struct bpf_struct_ops test_no_cif_ops = { + .verifier_ops = &dummy_verifier_ops, + .init = dummy_init, + .init_member = dummy_init_member, + .reg = dummy_reg, + .unreg = dummy_unreg, + .name = "bpf_test_no_cfi_ops", + .owner = THIS_MODULE, +}; + +static int bpf_test_no_cfi_init(void) +{ + int ret; + + ret = register_bpf_struct_ops(&test_no_cif_ops, + bpf_test_no_cfi_ops); + if (!ret) + return -EINVAL; + + test_no_cif_ops.cfi_stubs = &__test_no_cif_ops; + ret = register_bpf_struct_ops(&test_no_cif_ops, + bpf_test_no_cfi_ops); + return ret; +} + +static void bpf_test_no_cfi_exit(void) +{ +} + +module_init(bpf_test_no_cfi_init); +module_exit(bpf_test_no_cfi_exit); + +MODULE_AUTHOR("Kuifeng Lee"); +MODULE_DESCRIPTION("BPF no cfi_stubs test module"); +MODULE_LICENSE("Dual BSD/GPL"); + diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index e7c9e1c7fd..edcd261065 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2020 Facebook */ +#include <linux/bpf.h> #include <linux/btf.h> #include <linux/btf_ids.h> #include <linux/delay.h> @@ -204,6 +205,9 @@ __weak noinline struct file *bpf_testmod_return_ptr(int arg) case 5: return (void *)~(1ull << 30); /* trigger extable */ case 6: return &f; /* valid addr */ case 7: return (void *)((long)&f | 1); /* kernel tricks */ +#ifdef CONFIG_X86_64 + case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */ +#endif default: return NULL; } } @@ -342,12 +346,12 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .write = bpf_testmod_test_write, }; -BTF_SET8_START(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_kfunc_common_test) -BTF_SET8_END(bpf_testmod_common_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .owner = THIS_MODULE, @@ -493,7 +497,7 @@ __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused return arg; } -BTF_SET8_START(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) BTF_ID_FLAGS(func, bpf_kfunc_call_test1) BTF_ID_FLAGS(func, bpf_kfunc_call_test2) @@ -519,13 +523,120 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) -BTF_SET8_END(bpf_testmod_check_kfunc_ids) +BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids) + +static int bpf_testmod_ops_init(struct btf *btf) +{ + return 0; +} + +static bool bpf_testmod_ops_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + return bpf_tracing_btf_ctx_access(off, size, type, prog, info); +} + +static int bpf_testmod_ops_init_member(const struct btf_type *t, + const struct btf_member *member, + void *kdata, const void *udata) +{ + if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) { + /* For data fields, this function has to copy it and return + * 1 to indicate that the data has been handled by the + * struct_ops type, or the verifier will reject the map if + * the value of the data field is not zero. + */ + ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data; + return 1; + } + return 0; +} static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .owner = THIS_MODULE, .set = &bpf_testmod_check_kfunc_ids, }; +static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { + .is_valid_access = bpf_testmod_ops_is_valid_access, +}; + +static int bpf_dummy_reg(void *kdata) +{ + struct bpf_testmod_ops *ops = kdata; + + if (ops->test_1) + ops->test_1(); + /* Some test cases (ex. struct_ops_maybe_null) may not have test_2 + * initialized, so we need to check for NULL. + */ + if (ops->test_2) + ops->test_2(4, ops->data); + + return 0; +} + +static void bpf_dummy_unreg(void *kdata) +{ +} + +static int bpf_testmod_test_1(void) +{ + return 0; +} + +static void bpf_testmod_test_2(int a, int b) +{ +} + +static int bpf_testmod_ops__test_maybe_null(int dummy, + struct task_struct *task__nullable) +{ + return 0; +} + +static struct bpf_testmod_ops __bpf_testmod_ops = { + .test_1 = bpf_testmod_test_1, + .test_2 = bpf_testmod_test_2, + .test_maybe_null = bpf_testmod_ops__test_maybe_null, +}; + +struct bpf_struct_ops bpf_bpf_testmod_ops = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops, + .name = "bpf_testmod_ops", + .owner = THIS_MODULE, +}; + +static int bpf_dummy_reg2(void *kdata) +{ + struct bpf_testmod_ops2 *ops = kdata; + + ops->test_1(); + return 0; +} + +static struct bpf_testmod_ops2 __bpf_testmod_ops2 = { + .test_1 = bpf_testmod_test_1, +}; + +struct bpf_struct_ops bpf_testmod_ops2 = { + .verifier_ops = &bpf_testmod_verifier_ops, + .init = bpf_testmod_ops_init, + .init_member = bpf_testmod_ops_init_member, + .reg = bpf_dummy_reg2, + .unreg = bpf_dummy_unreg, + .cfi_stubs = &__bpf_testmod_ops2, + .name = "bpf_testmod_ops2", + .owner = THIS_MODULE, +}; + extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) @@ -536,6 +647,8 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); + ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); + ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) @@ -553,7 +666,7 @@ static void bpf_testmod_exit(void) while (refcount_read(&prog_test_struct.cnt) > 1) msleep(20); - return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); + sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file); } module_init(bpf_testmod_init); diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h index f32793efe0..23fa1872ee 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h @@ -5,6 +5,8 @@ #include <linux/types.h> +struct task_struct; + struct bpf_testmod_test_read_ctx { char *buf; loff_t off; @@ -28,4 +30,67 @@ struct bpf_iter_testmod_seq { int cnt; }; +struct bpf_testmod_ops { + int (*test_1)(void); + void (*test_2)(int a, int b); + /* Used to test nullable arguments. */ + int (*test_maybe_null)(int dummy, struct task_struct *task); + + /* The following fields are used to test shadow copies. */ + char onebyte; + struct { + int a; + int b; + } unsupported; + int data; + + /* The following pointers are used to test the maps having multiple + * pages of trampolines. + */ + int (*tramp_1)(int value); + int (*tramp_2)(int value); + int (*tramp_3)(int value); + int (*tramp_4)(int value); + int (*tramp_5)(int value); + int (*tramp_6)(int value); + int (*tramp_7)(int value); + int (*tramp_8)(int value); + int (*tramp_9)(int value); + int (*tramp_10)(int value); + int (*tramp_11)(int value); + int (*tramp_12)(int value); + int (*tramp_13)(int value); + int (*tramp_14)(int value); + int (*tramp_15)(int value); + int (*tramp_16)(int value); + int (*tramp_17)(int value); + int (*tramp_18)(int value); + int (*tramp_19)(int value); + int (*tramp_20)(int value); + int (*tramp_21)(int value); + int (*tramp_22)(int value); + int (*tramp_23)(int value); + int (*tramp_24)(int value); + int (*tramp_25)(int value); + int (*tramp_26)(int value); + int (*tramp_27)(int value); + int (*tramp_28)(int value); + int (*tramp_29)(int value); + int (*tramp_30)(int value); + int (*tramp_31)(int value); + int (*tramp_32)(int value); + int (*tramp_33)(int value); + int (*tramp_34)(int value); + int (*tramp_35)(int value); + int (*tramp_36)(int value); + int (*tramp_37)(int value); + int (*tramp_38)(int value); + int (*tramp_39)(int value); + int (*tramp_40)(int value); +}; + +struct bpf_testmod_ops2 { + int (*test_1)(void); +}; + #endif /* _BPF_TESTMOD_H */ diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c index e812876d79..19be9c63d5 100644 --- a/tools/testing/selftests/bpf/cgroup_helpers.c +++ b/tools/testing/selftests/bpf/cgroup_helpers.c @@ -508,9 +508,6 @@ int cgroup_setup_and_join(const char *path) { /** * setup_classid_environment() - Setup the cgroupv1 net_cls environment * - * This function should only be called in a custom mount namespace, e.g. - * created by running setup_cgroup_environment. - * * After calling this function, cleanup_classid_environment should be called * once testing is complete. * diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index c125c441ab..01f241ea2c 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -81,6 +81,7 @@ CONFIG_NF_NAT=y CONFIG_RC_CORE=y CONFIG_SECURITY=y CONFIG_SECURITYFS=y +CONFIG_SYN_COOKIES=y CONFIG_TEST_BPF=m CONFIG_USERFAULTFD=y CONFIG_VSOCKETS=y diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index be96bf0223..6db27a9088 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -461,8 +461,6 @@ struct nstoken *open_netns(const char *name) return token; fail: - if (token->orig_netns_fd != -1) - close(token->orig_netns_fd); free(token); return NULL; } diff --git a/tools/testing/selftests/bpf/prog_tests/arena_htab.c b/tools/testing/selftests/bpf/prog_tests/arena_htab.c new file mode 100644 index 0000000000..d69fd2465f --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_htab.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <network_helpers.h> +#include <sys/user.h> +#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */ +#include <unistd.h> +#define PAGE_SIZE getpagesize() +#endif +#include "arena_htab_asm.skel.h" +#include "arena_htab.skel.h" + +#include "bpf_arena_htab.h" + +static void test_arena_htab_common(struct htab *htab) +{ + int i; + + printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets); + ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL"); + for (i = 0; htab->buckets && i < 16; i += 4) { + /* + * Walk htab buckets and link lists since all pointers are correct, + * though they were written by bpf program. + */ + int val = htab_lookup_elem(htab, i); + + ASSERT_EQ(i, val, "key == value"); + } +} + +static void test_arena_htab_llvm(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + struct arena_htab *skel; + struct htab *htab; + size_t arena_sz; + void *area; + int ret; + + skel = arena_htab__open_and_load(); + if (!ASSERT_OK_PTR(skel, "arena_htab__open_and_load")) + return; + + area = bpf_map__initial_value(skel->maps.arena, &arena_sz); + /* fault-in a page with pgoff == 0 as sanity check */ + *(volatile int *)area = 0x55aa; + + /* bpf prog will allocate more pages */ + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_llvm), &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + if (skel->bss->skip) { + printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__); + test__skip(); + goto out; + } + htab = skel->bss->htab_for_user; + test_arena_htab_common(htab); +out: + arena_htab__destroy(skel); +} + +static void test_arena_htab_asm(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + struct arena_htab_asm *skel; + struct htab *htab; + int ret; + + skel = arena_htab_asm__open_and_load(); + if (!ASSERT_OK_PTR(skel, "arena_htab_asm__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_htab_asm), &opts); + ASSERT_OK(ret, "ret"); + ASSERT_OK(opts.retval, "retval"); + htab = skel->bss->htab_for_user; + test_arena_htab_common(htab); + arena_htab_asm__destroy(skel); +} + +void test_arena_htab(void) +{ + if (test__start_subtest("arena_htab_llvm")) + test_arena_htab_llvm(); + if (test__start_subtest("arena_htab_asm")) + test_arena_htab_asm(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/arena_list.c b/tools/testing/selftests/bpf/prog_tests/arena_list.c new file mode 100644 index 0000000000..d15867cddd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_list.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <sys/mman.h> +#include <network_helpers.h> +#include <sys/user.h> +#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */ +#include <unistd.h> +#define PAGE_SIZE getpagesize() +#endif + +#include "bpf_arena_list.h" +#include "arena_list.skel.h" + +struct elem { + struct arena_list_node node; + __u64 value; +}; + +static int list_sum(struct arena_list_head *head) +{ + struct elem __arena *n; + int sum = 0; + + list_for_each_entry(n, head, node) + sum += n->value; + return sum; +} + +static void test_arena_list_add_del(int cnt) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts); + struct arena_list *skel; + int expected_sum = (u64)cnt * (cnt - 1) / 2; + int ret, sum; + + skel = arena_list__open_and_load(); + if (!ASSERT_OK_PTR(skel, "arena_list__open_and_load")) + return; + + skel->bss->cnt = cnt; + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_add), &opts); + ASSERT_OK(ret, "ret_add"); + ASSERT_OK(opts.retval, "retval"); + if (skel->bss->skip) { + printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__); + test__skip(); + goto out; + } + sum = list_sum(skel->bss->list_head); + ASSERT_EQ(sum, expected_sum, "sum of elems"); + ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); + ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems"); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_del), &opts); + ASSERT_OK(ret, "ret_del"); + sum = list_sum(skel->bss->list_head); + ASSERT_EQ(sum, 0, "sum of list elems after del"); + ASSERT_EQ(skel->bss->list_sum, expected_sum, "sum of list elems computed by prog"); + ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); +out: + arena_list__destroy(skel); +} + +void test_arena_list(void) +{ + if (test__start_subtest("arena_list_1")) + test_arena_list_add_del(1); + if (test__start_subtest("arena_list_1000")) + test_arena_list_add_del(1000); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c new file mode 100644 index 0000000000..6a707213e4 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bad_struct_ops.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "bad_struct_ops.skel.h" +#include "bad_struct_ops2.skel.h" + +static void invalid_prog_reuse(void) +{ + struct bad_struct_ops *skel; + char *log = NULL; + int err; + + skel = bad_struct_ops__open(); + if (!ASSERT_OK_PTR(skel, "bad_struct_ops__open")) + return; + + if (start_libbpf_log_capture()) + goto cleanup; + + err = bad_struct_ops__load(skel); + log = stop_libbpf_log_capture(); + ASSERT_ERR(err, "bad_struct_ops__load should fail"); + ASSERT_HAS_SUBSTR(log, + "struct_ops init_kern testmod_2 func ptr test_1: invalid reuse of prog test_1", + "expected init_kern message"); + +cleanup: + free(log); + bad_struct_ops__destroy(skel); +} + +static void unused_program(void) +{ + struct bad_struct_ops2 *skel; + char *log = NULL; + int err; + + skel = bad_struct_ops2__open(); + if (!ASSERT_OK_PTR(skel, "bad_struct_ops2__open")) + return; + + /* struct_ops programs not referenced from any maps are open + * with autoload set to true. + */ + ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo autoload == true"); + + if (start_libbpf_log_capture()) + goto cleanup; + + err = bad_struct_ops2__load(skel); + ASSERT_ERR(err, "bad_struct_ops2__load should fail"); + log = stop_libbpf_log_capture(); + ASSERT_HAS_SUBSTR(log, "prog 'foo': failed to load", + "message about 'foo' failing to load"); + +cleanup: + free(log); + bad_struct_ops2__destroy(skel); +} + +void test_bad_struct_ops(void) +{ + if (test__start_subtest("invalid_prog_reuse")) + invalid_prog_reuse(); + if (test__start_subtest("unused_program")) + unused_program(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index e770912fc1..4c6ada5b27 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -35,7 +35,7 @@ static int check_load(const char *file, enum bpf_prog_type type) } bpf_program__set_type(prog, type); - bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS); + bpf_program__set_flags(prog, testing_prog_flags()); bpf_program__set_log_level(prog, 4 | extra_prog_load_log_flags); err = bpf_object__load(obj); diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 816145bcb6..00965a6e83 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3535,6 +3535,32 @@ static struct btf_raw_test raw_tests[] = { .value_type_id = 1, .max_entries = 1, }, +{ + .descr = "datasec: name '?.foo bar:buz' is ok", + .raw_types = { + /* int */ + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + /* VAR x */ /* [2] */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), + BTF_VAR_STATIC, + /* DATASEC ?.data */ /* [3] */ + BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), + BTF_VAR_SECINFO_ENC(2, 0, 4), + BTF_END_RAW, + }, + BTF_STR_SEC("\0x\0?.foo bar:buz"), +}, +{ + .descr = "type name '?foo' is not ok", + .raw_types = { + /* union ?foo; */ + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [1] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0?foo"), + .err_str = "Invalid name", + .btf_load_err = true, +}, { .descr = "float test #1, well-formed", @@ -4363,6 +4389,9 @@ static void do_test_raw(unsigned int test_num) if (err || btf_fd < 0) goto done; + if (!test->map_type) + goto done; + opts.btf_fd = btf_fd; opts.btf_key_type_id = test->key_type_id; opts.btf_value_type_id = test->value_type_id; diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c index 25332e5967..74d6d7546f 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup1_hierarchy.c @@ -87,12 +87,9 @@ void test_cgroup1_hierarchy(void) goto destroy; /* Setup cgroup1 hierarchy */ - err = setup_cgroup_environment(); - if (!ASSERT_OK(err, "setup_cgroup_environment")) - goto destroy; err = setup_classid_environment(); if (!ASSERT_OK(err, "setup_classid_environment")) - goto cleanup_cgroup; + goto destroy; err = join_classid(); if (!ASSERT_OK(err, "join_cgroup1")) @@ -156,8 +153,6 @@ void test_cgroup1_hierarchy(void) cleanup: cleanup_classid_environment(); -cleanup_cgroup: - cleanup_cgroup_environment(); destroy: test_cgroup1_hierarchy__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index c2e886399e..ecf89df781 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -27,7 +27,7 @@ static void verify_success(const char *prog_name) struct bpf_program *prog; struct bpf_link *link = NULL; pid_t child_pid; - int status; + int status, err; skel = cpumask_success__open(); if (!ASSERT_OK_PTR(skel, "cpumask_success__open")) @@ -36,8 +36,8 @@ static void verify_success(const char *prog_name) skel->bss->pid = getpid(); skel->bss->nr_cpus = libbpf_num_possible_cpus(); - cpumask_success__load(skel); - if (!ASSERT_OK_PTR(skel, "cpumask_success__load")) + err = cpumask_success__load(skel); + if (!ASSERT_OK(err, "cpumask_success__load")) goto cleanup; prog = bpf_object__find_program_by_name(skel->obj, prog_name); diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index 4951aa978f..3b7c57fe55 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -626,50 +626,6 @@ err: return false; } -/* Request BPF program instructions after all rewrites are applied, - * e.g. verifier.c:convert_ctx_access() is done. - */ -static int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) -{ - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - __u32 xlated_prog_len; - __u32 buf_element_size = sizeof(struct bpf_insn); - - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("bpf_prog_get_info_by_fd failed"); - return -1; - } - - xlated_prog_len = info.xlated_prog_len; - if (xlated_prog_len % buf_element_size) { - printf("Program length %d is not multiple of %d\n", - xlated_prog_len, buf_element_size); - return -1; - } - - *cnt = xlated_prog_len / buf_element_size; - *buf = calloc(*cnt, buf_element_size); - if (!buf) { - perror("can't allocate xlated program buffer"); - return -ENOMEM; - } - - bzero(&info, sizeof(info)); - info.xlated_prog_len = xlated_prog_len; - info.xlated_prog_insns = (__u64)(unsigned long)*buf; - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("second bpf_prog_get_info_by_fd failed"); - goto out_free_buf; - } - - return 0; - -out_free_buf: - free(*buf); - return -1; -} - static void print_insn(void *private_data, const char *fmt, ...) { va_list args; diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c index 5c0ebe6ba8..dcb9e5070c 100644 --- a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c +++ b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c @@ -72,6 +72,6 @@ fail: bpf_tc_hook_destroy(&qdisc_hook); close_netns(nstoken); } - SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); + SYS_NOFAIL("ip netns del " NS_TEST); decap_sanity__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index 4ad4cd6915..3379df2d4c 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -298,6 +298,6 @@ void test_fib_lookup(void) fail: if (nstoken) close_netns(nstoken); - SYS_NOFAIL("ip netns del " NS_TEST " &> /dev/null"); + SYS_NOFAIL("ip netns del " NS_TEST); fib_lookup__destroy(skel); } diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c index d4b1901f78..f3932941bb 100644 --- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c +++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c @@ -19,6 +19,7 @@ static const char *kmulti_syms[] = { }; #define KMULTI_CNT ARRAY_SIZE(kmulti_syms) static __u64 kmulti_addrs[KMULTI_CNT]; +static __u64 kmulti_cookies[] = { 3, 1, 2 }; #define KPROBE_FUNC "bpf_fentry_test1" static __u64 kprobe_addr; @@ -31,6 +32,8 @@ static noinline void uprobe_func(void) asm volatile (""); } +#define PERF_EVENT_COOKIE 0xdeadbeef + static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr, ssize_t offset, ssize_t entry_offset) { @@ -62,6 +65,8 @@ again: ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset, "kprobe_addr"); + ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie"); + if (!info.perf_event.kprobe.func_name) { ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len"); info.perf_event.kprobe.func_name = ptr_to_u64(&buf); @@ -81,6 +86,8 @@ again: goto again; } + ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie"); + err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME, strlen(TP_NAME)); ASSERT_EQ(err, 0, "cmp_tp_name"); @@ -96,10 +103,17 @@ again: goto again; } + ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie"); + err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE, strlen(UPROBE_FILE)); ASSERT_EQ(err, 0, "cmp_file_name"); break; + case BPF_PERF_EVENT_EVENT: + ASSERT_EQ(info.perf_event.event.type, PERF_TYPE_SOFTWARE, "event_type"); + ASSERT_EQ(info.perf_event.event.config, PERF_COUNT_SW_PAGE_FAULTS, "event_config"); + ASSERT_EQ(info.perf_event.event.cookie, PERF_EVENT_COOKIE, "event_cookie"); + break; default: err = -1; break; @@ -139,6 +153,7 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, .attach_mode = PROBE_ATTACH_MODE_LINK, .retprobe = type == BPF_PERF_EVENT_KRETPROBE, + .bpf_cookie = PERF_EVENT_COOKIE, ); ssize_t entry_offset = 0; struct bpf_link *link; @@ -163,10 +178,13 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel, static void test_tp_fill_link_info(struct test_fill_link_info *skel) { + DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts, + .bpf_cookie = PERF_EVENT_COOKIE, + ); struct bpf_link *link; int link_fd, err; - link = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME); + link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts); if (!ASSERT_OK_PTR(link, "attach_tp")) return; @@ -176,16 +194,53 @@ static void test_tp_fill_link_info(struct test_fill_link_info *skel) bpf_link__destroy(link); } +static void test_event_fill_link_info(struct test_fill_link_info *skel) +{ + DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts, + .bpf_cookie = PERF_EVENT_COOKIE, + ); + struct bpf_link *link; + int link_fd, err, pfd; + struct perf_event_attr attr = { + .type = PERF_TYPE_SOFTWARE, + .config = PERF_COUNT_SW_PAGE_FAULTS, + .freq = 1, + .sample_freq = 1, + .size = sizeof(struct perf_event_attr), + }; + + pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu 0 */, + -1 /* group id */, 0 /* flags */); + if (!ASSERT_GE(pfd, 0, "perf_event_open")) + return; + + link = bpf_program__attach_perf_event_opts(skel->progs.event_run, pfd, &opts); + if (!ASSERT_OK_PTR(link, "attach_event")) + goto error; + + link_fd = bpf_link__fd(link); + err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_EVENT, 0, 0, 0); + ASSERT_OK(err, "verify_perf_link_info"); + bpf_link__destroy(link); + +error: + close(pfd); +} + static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, enum bpf_perf_event_type type) { + DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, + .retprobe = type == BPF_PERF_EVENT_URETPROBE, + .bpf_cookie = PERF_EVENT_COOKIE, + ); struct bpf_link *link; int link_fd, err; - link = bpf_program__attach_uprobe(skel->progs.uprobe_run, - type == BPF_PERF_EVENT_URETPROBE, - 0, /* self pid */ - UPROBE_FILE, uprobe_offset); + link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run, + 0, /* self pid */ + UPROBE_FILE, uprobe_offset, + &opts); if (!ASSERT_OK_PTR(link, "attach_uprobe")) return; @@ -195,11 +250,11 @@ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel, bpf_link__destroy(link); } -static int verify_kmulti_link_info(int fd, bool retprobe) +static int verify_kmulti_link_info(int fd, bool retprobe, bool has_cookies) { + __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; struct bpf_link_info info; __u32 len = sizeof(info); - __u64 addrs[KMULTI_CNT]; int flags, i, err; memset(&info, 0, sizeof(info)); @@ -221,18 +276,22 @@ again: if (!info.kprobe_multi.addrs) { info.kprobe_multi.addrs = ptr_to_u64(addrs); + info.kprobe_multi.cookies = ptr_to_u64(cookies); goto again; } - for (i = 0; i < KMULTI_CNT; i++) + for (i = 0; i < KMULTI_CNT; i++) { ASSERT_EQ(addrs[i], kmulti_addrs[i], "kmulti_addrs"); + ASSERT_EQ(cookies[i], has_cookies ? kmulti_cookies[i] : 0, + "kmulti_cookies_value"); + } return 0; } static void verify_kmulti_invalid_user_buffer(int fd) { + __u64 addrs[KMULTI_CNT], cookies[KMULTI_CNT]; struct bpf_link_info info; __u32 len = sizeof(info); - __u64 addrs[KMULTI_CNT]; int err, i; memset(&info, 0, sizeof(info)); @@ -266,7 +325,20 @@ static void verify_kmulti_invalid_user_buffer(int fd) info.kprobe_multi.count = KMULTI_CNT; info.kprobe_multi.addrs = 0x1; /* invalid addr */ err = bpf_link_get_info_by_fd(fd, &info, &len); - ASSERT_EQ(err, -EFAULT, "invalid_buff"); + ASSERT_EQ(err, -EFAULT, "invalid_buff_addrs"); + + info.kprobe_multi.count = KMULTI_CNT; + info.kprobe_multi.addrs = ptr_to_u64(addrs); + info.kprobe_multi.cookies = 0x1; /* invalid addr */ + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EFAULT, "invalid_buff_cookies"); + + /* cookies && !count */ + info.kprobe_multi.count = 0; + info.kprobe_multi.addrs = ptr_to_u64(NULL); + info.kprobe_multi.cookies = ptr_to_u64(cookies); + err = bpf_link_get_info_by_fd(fd, &info, &len); + ASSERT_EQ(err, -EINVAL, "invalid_cookies_count"); } static int symbols_cmp_r(const void *a, const void *b) @@ -278,13 +350,15 @@ static int symbols_cmp_r(const void *a, const void *b) } static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, - bool retprobe, bool invalid) + bool retprobe, bool cookies, + bool invalid) { LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); struct bpf_link *link; int link_fd, err; opts.syms = kmulti_syms; + opts.cookies = cookies ? kmulti_cookies : NULL; opts.cnt = KMULTI_CNT; opts.retprobe = retprobe; link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts); @@ -293,7 +367,7 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel, link_fd = bpf_link__fd(link); if (!invalid) { - err = verify_kmulti_link_info(link_fd, retprobe); + err = verify_kmulti_link_info(link_fd, retprobe, cookies); ASSERT_OK(err, "verify_kmulti_link_info"); } else { verify_kmulti_invalid_user_buffer(link_fd); @@ -513,6 +587,8 @@ void test_fill_link_info(void) test_kprobe_fill_link_info(skel, BPF_PERF_EVENT_KPROBE, true); if (test__start_subtest("tracepoint_link_info")) test_tp_fill_link_info(skel); + if (test__start_subtest("event_link_info")) + test_event_fill_link_info(skel); uprobe_offset = get_uprobe_offset(&uprobe_func); if (test__start_subtest("uprobe_link_info")) @@ -523,12 +599,16 @@ void test_fill_link_info(void) qsort(kmulti_syms, KMULTI_CNT, sizeof(kmulti_syms[0]), symbols_cmp_r); for (i = 0; i < KMULTI_CNT; i++) kmulti_addrs[i] = ksym_get_addr(kmulti_syms[i]); - if (test__start_subtest("kprobe_multi_link_info")) - test_kprobe_multi_fill_link_info(skel, false, false); - if (test__start_subtest("kretprobe_multi_link_info")) - test_kprobe_multi_fill_link_info(skel, true, false); + if (test__start_subtest("kprobe_multi_link_info")) { + test_kprobe_multi_fill_link_info(skel, false, false, false); + test_kprobe_multi_fill_link_info(skel, false, true, false); + } + if (test__start_subtest("kretprobe_multi_link_info")) { + test_kprobe_multi_fill_link_info(skel, true, false, false); + test_kprobe_multi_fill_link_info(skel, true, true, false); + } if (test__start_subtest("kprobe_multi_invalid_ubuff")) - test_kprobe_multi_fill_link_info(skel, true, true); + test_kprobe_multi_fill_link_info(skel, true, true, true); if (test__start_subtest("uprobe_multi_link_info")) test_uprobe_multi_fill_link_info(skel, false, false); diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c index 57c814f5f6..8dd2af9081 100644 --- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -59,9 +59,9 @@ static int setup_topology(bool ipv6) /* Wait for up to 5s for links to come up */ for (i = 0; i < 5; ++i) { if (ipv6) - up = !system("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6 " &>/dev/null"); + up = !SYS_NOFAIL("ip netns exec " NS0 " ping -6 -c 1 -W 1 " VETH1_ADDR6); else - up = !system("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR " &>/dev/null"); + up = !SYS_NOFAIL("ip netns exec " NS0 " ping -c 1 -W 1 " VETH1_ADDR); if (up) break; diff --git a/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c new file mode 100644 index 0000000000..7def158da9 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <test_progs.h> + +#include "linux/filter.h" +#include "kptr_xchg_inline.skel.h" + +void test_kptr_xchg_inline(void) +{ + struct kptr_xchg_inline *skel; + struct bpf_insn *insn = NULL; + struct bpf_insn exp; + unsigned int cnt; + int err; + +#if !(defined(__x86_64__) || defined(__aarch64__) || \ + (defined(__riscv) && __riscv_xlen == 64)) + test__skip(); + return; +#endif + + skel = kptr_xchg_inline__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_load")) + return; + + err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt); + if (!ASSERT_OK(err, "prog insn")) + goto out; + + /* The original instructions are: + * r1 = map[id:xxx][0]+0 + * r2 = 0 + * call bpf_kptr_xchg#yyy + * + * call bpf_kptr_xchg#yyy will be inlined as: + * r0 = r2 + * r0 = atomic64_xchg((u64 *)(r1 +0), r0) + */ + if (!ASSERT_GT(cnt, 5, "insn cnt")) + goto out; + + exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2); + if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov")) + goto out; + + exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0); + if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg")) + goto out; +out: + free(insn); + kptr_xchg_inline__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c index 9f766ddd94..4ed46ed58a 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_probes.c @@ -30,6 +30,8 @@ void test_libbpf_probe_prog_types(void) if (prog_type == BPF_PROG_TYPE_UNSPEC) continue; + if (strcmp(prog_type_name, "__MAX_BPF_PROG_TYPE") == 0) + continue; if (!test__start_subtest(prog_type_name)) continue; @@ -68,6 +70,8 @@ void test_libbpf_probe_map_types(void) if (map_type == BPF_MAP_TYPE_UNSPEC) continue; + if (strcmp(map_type_name, "__MAX_BPF_MAP_TYPE") == 0) + continue; if (!test__start_subtest(map_type_name)) continue; diff --git a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c index eb34d612d6..62ea855ec4 100644 --- a/tools/testing/selftests/bpf/prog_tests/libbpf_str.c +++ b/tools/testing/selftests/bpf/prog_tests/libbpf_str.c @@ -132,6 +132,9 @@ static void test_libbpf_bpf_map_type_str(void) const char *map_type_str; char buf[256]; + if (map_type == __MAX_BPF_MAP_TYPE) + continue; + map_type_name = btf__str_by_offset(btf, e->name_off); map_type_str = libbpf_bpf_map_type_str(map_type); ASSERT_OK_PTR(map_type_str, map_type_name); @@ -186,6 +189,9 @@ static void test_libbpf_bpf_prog_type_str(void) const char *prog_type_str; char buf[256]; + if (prog_type == __MAX_BPF_PROG_TYPE) + continue; + prog_type_name = btf__str_by_offset(btf, e->name_off); prog_type_str = libbpf_bpf_prog_type_str(prog_type); ASSERT_OK_PTR(prog_type_str, prog_type_name); diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c index 7a3fa2ff56..90a98e23be 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_fixup.c +++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c @@ -169,9 +169,9 @@ void test_log_fixup(void) if (test__start_subtest("bad_core_relo_trunc_none")) bad_core_relo(0, TRUNC_NONE /* full buf */); if (test__start_subtest("bad_core_relo_trunc_partial")) - bad_core_relo(280, TRUNC_PARTIAL /* truncate original log a bit */); + bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */); if (test__start_subtest("bad_core_relo_trunc_full")) - bad_core_relo(220, TRUNC_FULL /* truncate also libbpf's message patch */); + bad_core_relo(240, TRUNC_FULL /* truncate also libbpf's message patch */); if (test__start_subtest("bad_core_relo_subprog")) bad_core_relo_subprog(); if (test__start_subtest("missing_map")) diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h index e9190574e7..fb1eb8c673 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/lwt_helpers.h @@ -27,8 +27,6 @@ } \ }) -#define NETNS "ns_lwt" - static inline int netns_create(void) { return system("ip netns add " NETNS); diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c index 2bc932a18c..835a1d756c 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_redirect.c @@ -54,6 +54,7 @@ #include <stdbool.h> #include <stdlib.h> +#define NETNS "ns_lwt_redirect" #include "lwt_helpers.h" #include "test_progs.h" #include "network_helpers.h" @@ -85,7 +86,7 @@ static void ping_dev(const char *dev, bool is_ingress) snprintf(ip, sizeof(ip), "20.0.0.%d", link_index); /* We won't get a reply. Don't fail here */ - SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + SYS_NOFAIL("ping %s -c1 -W1 -s %d", ip, ICMP_PAYLOAD_SIZE); } diff --git a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c index f4bb2d5fca..03825d2b45 100644 --- a/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c +++ b/tools/testing/selftests/bpf/prog_tests/lwt_reroute.c @@ -48,6 +48,7 @@ * For case 2, force UDP packets to overflow fq limit. As long as kernel * is not crashed, it is considered successful. */ +#define NETNS "ns_lwt_reroute" #include "lwt_helpers.h" #include "network_helpers.h" #include <linux/net_tstamp.h> @@ -63,7 +64,7 @@ static void ping_once(const char *ip) { /* We won't get a reply. Don't fail here */ - SYS_NOFAIL("ping %s -c1 -W1 -s %d >/dev/null 2>&1", + SYS_NOFAIL("ping %s -c1 -W1 -s %d", ip, ICMP_PAYLOAD_SIZE); } diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index 7c0be7cf55..8f8d792307 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -79,7 +79,7 @@ static void cleanup_netns(struct nstoken *nstoken) if (nstoken) close_netns(nstoken); - SYS_NOFAIL("ip netns del %s &> /dev/null", NS_TEST); + SYS_NOFAIL("ip netns del %s", NS_TEST); } static int verify_tsk(int map_fd, int client_fd) diff --git a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c index 3f1f58d3a7..a1f7e7378a 100644 --- a/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c @@ -29,6 +29,10 @@ static void test_success(void) bpf_program__set_autoload(skel->progs.non_sleepable_1, true); bpf_program__set_autoload(skel->progs.non_sleepable_2, true); bpf_program__set_autoload(skel->progs.task_trusted_non_rcuptr, true); + bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog, true); + bpf_program__set_autoload(skel->progs.rcu_read_lock_global_subprog, true); + bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_lock, true); + bpf_program__set_autoload(skel->progs.rcu_read_lock_subprog_unlock, true); err = rcu_read_lock__load(skel); if (!ASSERT_OK(err, "skel_load")) goto out; @@ -75,6 +79,8 @@ static const char * const inproper_region_tests[] = { "inproper_sleepable_helper", "inproper_sleepable_kfunc", "nested_rcu_region", + "rcu_read_lock_global_subprog_lock", + "rcu_read_lock_global_subprog_unlock", }; static void test_inproper_region(void) diff --git a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c index 820d0bcfc4..eb74363f9f 100644 --- a/tools/testing/selftests/bpf/prog_tests/reg_bounds.c +++ b/tools/testing/selftests/bpf/prog_tests/reg_bounds.c @@ -840,7 +840,7 @@ static int load_range_cmp_prog(struct range x, struct range y, enum op op, .log_level = 2, .log_buf = log_buf, .log_size = log_sz, - .prog_flags = BPF_F_TEST_REG_INVARIANTS, + .prog_flags = testing_prog_flags(), ); /* ; skip exit block below diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c index b0583309a9..9c11938fe5 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c @@ -214,7 +214,7 @@ void test_sock_destroy(void) cleanup: if (nstoken) close_netns(nstoken); - SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); + SYS_NOFAIL("ip netns del " TEST_NS); if (cgroup_fd >= 0) close(cgroup_fd); sock_destroy_prog__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c index 0c365f36c7..d56e18b255 100644 --- a/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/prog_tests/sock_iter_batch.c @@ -112,7 +112,7 @@ void test_sock_iter_batch(void) { struct nstoken *nstoken = NULL; - SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); + SYS_NOFAIL("ip netns del " TEST_NS); SYS(done, "ip netns add %s", TEST_NS); SYS(done, "ip -net %s link set dev lo up", TEST_NS); @@ -131,5 +131,5 @@ void test_sock_iter_batch(void) close_netns(nstoken); done: - SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); + SYS_NOFAIL("ip netns del " TEST_NS); } diff --git a/tools/testing/selftests/bpf/prog_tests/spin_lock.c b/tools/testing/selftests/bpf/prog_tests/spin_lock.c index 18d451be57..2b0068742e 100644 --- a/tools/testing/selftests/bpf/prog_tests/spin_lock.c +++ b/tools/testing/selftests/bpf/prog_tests/spin_lock.c @@ -48,6 +48,8 @@ static struct { { "lock_id_mismatch_innermapval_kptr", "bpf_spin_unlock of different lock" }, { "lock_id_mismatch_innermapval_global", "bpf_spin_unlock of different lock" }, { "lock_id_mismatch_innermapval_mapval", "bpf_spin_unlock of different lock" }, + { "lock_global_subprog_call1", "global function calls are not allowed while holding a lock" }, + { "lock_global_subprog_call2", "global function calls are not allowed while holding a lock" }, }; static int match_regex(const char *pattern, const char *string) diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c new file mode 100644 index 0000000000..a5cc593c1e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_autocreate.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <test_progs.h> +#include "struct_ops_autocreate.skel.h" +#include "struct_ops_autocreate2.skel.h" + +static void cant_load_full_object(void) +{ + struct struct_ops_autocreate *skel; + char *log = NULL; + int err; + + skel = struct_ops_autocreate__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open")) + return; + + if (start_libbpf_log_capture()) + goto cleanup; + /* The testmod_2 map BTF type (struct bpf_testmod_ops___v2) doesn't + * match the BTF of the actual struct bpf_testmod_ops defined in the + * kernel, so we should fail to load it if we don't disable autocreate + * for that map. + */ + err = struct_ops_autocreate__load(skel); + log = stop_libbpf_log_capture(); + if (!ASSERT_ERR(err, "struct_ops_autocreate__load")) + goto cleanup; + + ASSERT_HAS_SUBSTR(log, "libbpf: struct_ops init_kern", "init_kern message"); + ASSERT_EQ(err, -ENOTSUP, "errno should be ENOTSUP"); + +cleanup: + free(log); + struct_ops_autocreate__destroy(skel); +} + +static int check_test_1_link(struct struct_ops_autocreate *skel, struct bpf_map *map) +{ + struct bpf_link *link; + int err; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) + return -1; + + /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */ + err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result"); + bpf_link__destroy(link); + return err; +} + +static void can_load_partial_object(void) +{ + struct struct_ops_autocreate *skel; + int err; + + skel = struct_ops_autocreate__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts")) + return; + + err = bpf_map__set_autocreate(skel->maps.testmod_2, false); + if (!ASSERT_OK(err, "bpf_map__set_autocreate")) + goto cleanup; + + ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 default autoload"); + ASSERT_TRUE(bpf_program__autoload(skel->progs.test_2), "test_2 default autoload"); + + err = struct_ops_autocreate__load(skel); + if (ASSERT_OK(err, "struct_ops_autocreate__load")) + goto cleanup; + + ASSERT_TRUE(bpf_program__autoload(skel->progs.test_1), "test_1 actual autoload"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.test_2), "test_2 actual autoload"); + + check_test_1_link(skel, skel->maps.testmod_1); + +cleanup: + struct_ops_autocreate__destroy(skel); +} + +static void optional_maps(void) +{ + struct struct_ops_autocreate *skel; + int err; + + skel = struct_ops_autocreate__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open")) + return; + + ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_1), "testmod_1 autocreate"); + ASSERT_TRUE(bpf_map__autocreate(skel->maps.testmod_2), "testmod_2 autocreate"); + ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map), "optional_map autocreate"); + ASSERT_FALSE(bpf_map__autocreate(skel->maps.optional_map2), "optional_map2 autocreate"); + + err = bpf_map__set_autocreate(skel->maps.testmod_1, false); + err |= bpf_map__set_autocreate(skel->maps.testmod_2, false); + err |= bpf_map__set_autocreate(skel->maps.optional_map2, true); + if (!ASSERT_OK(err, "bpf_map__set_autocreate")) + goto cleanup; + + err = struct_ops_autocreate__load(skel); + if (ASSERT_OK(err, "struct_ops_autocreate__load")) + goto cleanup; + + check_test_1_link(skel, skel->maps.optional_map2); + +cleanup: + struct_ops_autocreate__destroy(skel); +} + +/* Swap test_mod1->test_1 program from 'bar' to 'foo' using shadow vars. + * test_mod1 load should enable autoload for 'foo'. + */ +static void autoload_and_shadow_vars(void) +{ + struct struct_ops_autocreate2 *skel = NULL; + struct bpf_link *link = NULL; + int err; + + skel = struct_ops_autocreate2__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_autocreate__open_opts")) + return; + + ASSERT_FALSE(bpf_program__autoload(skel->progs.foo), "foo default autoload"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar default autoload"); + + /* loading map testmod_1 would switch foo's autoload to true */ + skel->struct_ops.testmod_1->test_1 = skel->progs.foo; + + err = struct_ops_autocreate2__load(skel); + if (ASSERT_OK(err, "struct_ops_autocreate__load")) + goto cleanup; + + ASSERT_TRUE(bpf_program__autoload(skel->progs.foo), "foo actual autoload"); + ASSERT_FALSE(bpf_program__autoload(skel->progs.bar), "bar actual autoload"); + + link = bpf_map__attach_struct_ops(skel->maps.testmod_1); + if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) + goto cleanup; + + /* test_1() would be called from bpf_dummy_reg2() in bpf_testmod.c */ + err = ASSERT_EQ(skel->bss->test_1_result, 42, "test_1_result"); + +cleanup: + bpf_link__destroy(link); + struct_ops_autocreate2__destroy(skel); +} + +void test_struct_ops_autocreate(void) +{ + if (test__start_subtest("cant_load_full_object")) + cant_load_full_object(); + if (test__start_subtest("can_load_partial_object")) + can_load_partial_object(); + if (test__start_subtest("autoload_and_shadow_vars")) + autoload_and_shadow_vars(); + if (test__start_subtest("optional_maps")) + optional_maps(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c index ea8537c544..c33c05161a 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_local_storage.c +++ b/tools/testing/selftests/bpf/prog_tests/task_local_storage.c @@ -117,12 +117,6 @@ static void test_recursion(void) ASSERT_OK(err, "lookup map_b"); ASSERT_EQ(value, 100, "map_b value"); - prog_fd = bpf_program__fd(skel->progs.on_lookup); - memset(&info, 0, sizeof(info)); - err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); - ASSERT_OK(err, "get prog info"); - ASSERT_GT(info.recursion_misses, 0, "on_lookup prog recursion"); - prog_fd = bpf_program__fd(skel->progs.on_update); memset(&info, 0, sizeof(info)); err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len); diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c new file mode 100644 index 0000000000..eaf441dc7e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tcp_custom_syncookie.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#define _GNU_SOURCE +#include <sched.h> +#include <stdlib.h> +#include <net/if.h> + +#include "test_progs.h" +#include "cgroup_helpers.h" +#include "network_helpers.h" +#include "test_tcp_custom_syncookie.skel.h" + +static struct test_tcp_custom_syncookie_case { + int family, type; + char addr[16]; + char name[10]; +} test_cases[] = { + { + .name = "IPv4 TCP", + .family = AF_INET, + .type = SOCK_STREAM, + .addr = "127.0.0.1", + }, + { + .name = "IPv6 TCP", + .family = AF_INET6, + .type = SOCK_STREAM, + .addr = "::1", + }, +}; + +static int setup_netns(void) +{ + if (!ASSERT_OK(unshare(CLONE_NEWNET), "create netns")) + return -1; + + if (!ASSERT_OK(system("ip link set dev lo up"), "ip")) + goto err; + + if (!ASSERT_OK(write_sysctl("/proc/sys/net/ipv4/tcp_ecn", "1"), + "write_sysctl")) + goto err; + + return 0; +err: + return -1; +} + +static int setup_tc(struct test_tcp_custom_syncookie *skel) +{ + LIBBPF_OPTS(bpf_tc_hook, qdisc_lo, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, tc_attach, + .prog_fd = bpf_program__fd(skel->progs.tcp_custom_syncookie)); + + qdisc_lo.ifindex = if_nametoindex("lo"); + if (!ASSERT_OK(bpf_tc_hook_create(&qdisc_lo), "qdisc add dev lo clsact")) + goto err; + + if (!ASSERT_OK(bpf_tc_attach(&qdisc_lo, &tc_attach), + "filter add dev lo ingress")) + goto err; + + return 0; +err: + return -1; +} + +#define msg "Hello World" +#define msglen 11 + +static void transfer_message(int sender, int receiver) +{ + char buf[msglen]; + int ret; + + ret = send(sender, msg, msglen, 0); + if (!ASSERT_EQ(ret, msglen, "send")) + return; + + memset(buf, 0, sizeof(buf)); + + ret = recv(receiver, buf, msglen, 0); + if (!ASSERT_EQ(ret, msglen, "recv")) + return; + + ret = strncmp(buf, msg, msglen); + if (!ASSERT_EQ(ret, 0, "strncmp")) + return; +} + +static void create_connection(struct test_tcp_custom_syncookie_case *test_case) +{ + int server, client, child; + + server = start_server(test_case->family, test_case->type, test_case->addr, 0, 0); + if (!ASSERT_NEQ(server, -1, "start_server")) + return; + + client = connect_to_fd(server, 0); + if (!ASSERT_NEQ(client, -1, "connect_to_fd")) + goto close_server; + + child = accept(server, NULL, 0); + if (!ASSERT_NEQ(child, -1, "accept")) + goto close_client; + + transfer_message(client, child); + transfer_message(child, client); + + close(child); +close_client: + close(client); +close_server: + close(server); +} + +void test_tcp_custom_syncookie(void) +{ + struct test_tcp_custom_syncookie *skel; + int i; + + if (setup_netns()) + return; + + skel = test_tcp_custom_syncookie__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + return; + + if (setup_tc(skel)) + goto destroy_skel; + + for (i = 0; i < ARRAY_SIZE(test_cases); i++) { + if (!test__start_subtest(test_cases[i].name)) + continue; + + skel->bss->handled_syn = false; + skel->bss->handled_ack = false; + + create_connection(&test_cases[i]); + + ASSERT_EQ(skel->bss->handled_syn, true, "SYN is not handled at tc."); + ASSERT_EQ(skel->bss->handled_ack, true, "ACK is not handled at tc"); + } + +destroy_skel: + system("tc qdisc del dev lo clsact"); + + test_tcp_custom_syncookie__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c new file mode 100644 index 0000000000..01dc2613c8 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_maybe_null.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#include "struct_ops_maybe_null.skel.h" +#include "struct_ops_maybe_null_fail.skel.h" + +/* Test that the verifier accepts a program that access a nullable pointer + * with a proper check. + */ +static void maybe_null(void) +{ + struct struct_ops_maybe_null *skel; + + skel = struct_ops_maybe_null__open_and_load(); + if (!ASSERT_OK_PTR(skel, "struct_ops_module_open_and_load")) + return; + + struct_ops_maybe_null__destroy(skel); +} + +/* Test that the verifier rejects a program that access a nullable pointer + * without a check beforehand. + */ +static void maybe_null_fail(void) +{ + struct struct_ops_maybe_null_fail *skel; + + skel = struct_ops_maybe_null_fail__open_and_load(); + if (ASSERT_ERR_PTR(skel, "struct_ops_module_fail__open_and_load")) + return; + + struct_ops_maybe_null_fail__destroy(skel); +} + +void test_struct_ops_maybe_null(void) +{ + /* The verifier verifies the programs at load time, so testing both + * programs in the same compile-unit is complicated. We run them in + * separate objects to simplify the testing. + */ + if (test__start_subtest("maybe_null")) + maybe_null(); + if (test__start_subtest("maybe_null_fail")) + maybe_null_fail(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c new file mode 100644 index 0000000000..ee5372c7f2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <time.h> + +#include "struct_ops_module.skel.h" + +static void check_map_info(struct bpf_map_info *info) +{ + struct bpf_btf_info btf_info; + char btf_name[256]; + u32 btf_info_len = sizeof(btf_info); + int err, fd; + + fd = bpf_btf_get_fd_by_id(info->btf_vmlinux_id); + if (!ASSERT_GE(fd, 0, "get_value_type_btf_obj_fd")) + return; + + memset(&btf_info, 0, sizeof(btf_info)); + btf_info.name = ptr_to_u64(btf_name); + btf_info.name_len = sizeof(btf_name); + err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_info_len); + if (!ASSERT_OK(err, "get_value_type_btf_obj_info")) + goto cleanup; + + if (!ASSERT_EQ(strcmp(btf_name, "bpf_testmod"), 0, "get_value_type_btf_obj_name")) + goto cleanup; + +cleanup: + close(fd); +} + +static int attach_ops_and_check(struct struct_ops_module *skel, + struct bpf_map *map, + int expected_test_2_result) +{ + struct bpf_link *link; + + link = bpf_map__attach_struct_ops(map); + ASSERT_OK_PTR(link, "attach_test_mod_1"); + if (!link) + return -1; + + /* test_{1,2}() would be called from bpf_dummy_reg() in bpf_testmod.c */ + ASSERT_EQ(skel->bss->test_1_result, 0xdeadbeef, "test_1_result"); + ASSERT_EQ(skel->bss->test_2_result, expected_test_2_result, "test_2_result"); + + bpf_link__destroy(link); + return 0; +} + +static void test_struct_ops_load(void) +{ + struct struct_ops_module *skel; + struct bpf_map_info info = {}; + int err; + u32 len; + + skel = struct_ops_module__open(); + if (!ASSERT_OK_PTR(skel, "struct_ops_module_open")) + return; + + skel->struct_ops.testmod_1->data = 13; + skel->struct_ops.testmod_1->test_2 = skel->progs.test_3; + /* Since test_2() is not being used, it should be disabled from + * auto-loading, or it will fail to load. + */ + bpf_program__set_autoload(skel->progs.test_2, false); + + err = struct_ops_module__load(skel); + if (!ASSERT_OK(err, "struct_ops_module_load")) + goto cleanup; + + len = sizeof(info); + err = bpf_map_get_info_by_fd(bpf_map__fd(skel->maps.testmod_1), &info, + &len); + if (!ASSERT_OK(err, "bpf_map_get_info_by_fd")) + goto cleanup; + + check_map_info(&info); + /* test_3() will be called from bpf_dummy_reg() in bpf_testmod.c + * + * In bpf_testmod.c it will pass 4 and 13 (the value of data) to + * .test_2. So, the value of test_2_result should be 20 (4 + 13 + + * 3). + */ + if (!attach_ops_and_check(skel, skel->maps.testmod_1, 20)) + goto cleanup; + if (!attach_ops_and_check(skel, skel->maps.testmod_2, 12)) + goto cleanup; + +cleanup: + struct_ops_module__destroy(skel); +} + +void serial_test_struct_ops_module(void) +{ + if (test__start_subtest("test_struct_ops_load")) + test_struct_ops_load(); +} + diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c new file mode 100644 index 0000000000..645d32b516 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_multi_pages.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> + +#include "struct_ops_multi_pages.skel.h" + +static void do_struct_ops_multi_pages(void) +{ + struct struct_ops_multi_pages *skel; + struct bpf_link *link; + + /* The size of all trampolines of skel->maps.multi_pages should be + * over 1 page (at least for x86). + */ + skel = struct_ops_multi_pages__open_and_load(); + if (!ASSERT_OK_PTR(skel, "struct_ops_multi_pages_open_and_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.multi_pages); + ASSERT_OK_PTR(link, "attach_multi_pages"); + + bpf_link__destroy(link); + struct_ops_multi_pages__destroy(skel); +} + +void test_struct_ops_multi_pages(void) +{ + if (test__start_subtest("multi_pages")) + do_struct_ops_multi_pages(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c new file mode 100644 index 0000000000..106ea44796 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_no_cfi.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <testing_helpers.h> + +static void load_bpf_test_no_cfi(void) +{ + int fd; + int err; + + fd = open("bpf_test_no_cfi.ko", O_RDONLY); + if (!ASSERT_GE(fd, 0, "open")) + return; + + /* The module will try to register a struct_ops type without + * cfi_stubs and with cfi_stubs. + * + * The one without cfi_stub should fail. The module will be loaded + * successfully only if the result of the registration is as + * expected, or it fails. + */ + err = finit_module(fd, "", 0); + close(fd); + if (!ASSERT_OK(err, "finit_module")) + return; + + err = delete_module("bpf_test_no_cfi", 0); + ASSERT_OK(err, "delete_module"); +} + +void test_struct_ops_no_cfi(void) +{ + if (test__start_subtest("load_bpf_test_no_cfi")) + load_bpf_test_no_cfi(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c index 2b3c6dd662..5f1fb0a2ea 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_tunnel.c +++ b/tools/testing/selftests/bpf/prog_tests/test_tunnel.c @@ -118,9 +118,9 @@ fail: static void cleanup(void) { SYS_NOFAIL("test -f /var/run/netns/at_ns0 && ip netns delete at_ns0"); - SYS_NOFAIL("ip link del veth1 2> /dev/null"); - SYS_NOFAIL("ip link del %s 2> /dev/null", VXLAN_TUNL_DEV1); - SYS_NOFAIL("ip link del %s 2> /dev/null", IP6VXLAN_TUNL_DEV1); + SYS_NOFAIL("ip link del veth1"); + SYS_NOFAIL("ip link del %s", VXLAN_TUNL_DEV1); + SYS_NOFAIL("ip link del %s", IP6VXLAN_TUNL_DEV1); } static int add_vxlan_tunnel(void) @@ -265,9 +265,9 @@ fail: static void delete_ipip_tunnel(void) { SYS_NOFAIL("ip -n at_ns0 link delete dev %s", IPIP_TUNL_DEV0); - SYS_NOFAIL("ip -n at_ns0 fou del port 5555 2> /dev/null"); + SYS_NOFAIL("ip -n at_ns0 fou del port 5555"); SYS_NOFAIL("ip link delete dev %s", IPIP_TUNL_DEV1); - SYS_NOFAIL("ip fou del port 5555 2> /dev/null"); + SYS_NOFAIL("ip fou del port 5555"); } static int add_xfrm_tunnel(void) @@ -346,13 +346,13 @@ fail: static void delete_xfrm_tunnel(void) { - SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32 2> /dev/null", + SYS_NOFAIL("ip xfrm policy delete dir out src %s/32 dst %s/32", IP4_ADDR_TUNL_DEV1, IP4_ADDR_TUNL_DEV0); - SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32 2> /dev/null", + SYS_NOFAIL("ip xfrm policy delete dir in src %s/32 dst %s/32", IP4_ADDR_TUNL_DEV0, IP4_ADDR_TUNL_DEV1); - SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null", + SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d", IP4_ADDR_VETH0, IP4_ADDR1_VETH1, XFRM_SPI_IN_TO_OUT); - SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d 2> /dev/null", + SYS_NOFAIL("ip xfrm state delete src %s dst %s proto esp spi %d", IP4_ADDR1_VETH1, IP4_ADDR_VETH0, XFRM_SPI_OUT_TO_IN); } diff --git a/tools/testing/selftests/bpf/prog_tests/token.c b/tools/testing/selftests/bpf/prog_tests/token.c new file mode 100644 index 0000000000..fc4a175d8d --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/token.c @@ -0,0 +1,1052 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#define _GNU_SOURCE +#include <test_progs.h> +#include <bpf/btf.h> +#include "cap_helpers.h" +#include <fcntl.h> +#include <sched.h> +#include <signal.h> +#include <unistd.h> +#include <linux/filter.h> +#include <linux/unistd.h> +#include <linux/mount.h> +#include <sys/socket.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <sys/un.h> +#include "priv_map.skel.h" +#include "priv_prog.skel.h" +#include "dummy_st_ops_success.skel.h" +#include "token_lsm.skel.h" + +static inline int sys_mount(const char *dev_name, const char *dir_name, + const char *type, unsigned long flags, + const void *data) +{ + return syscall(__NR_mount, dev_name, dir_name, type, flags, data); +} + +static inline int sys_fsopen(const char *fsname, unsigned flags) +{ + return syscall(__NR_fsopen, fsname, flags); +} + +static inline int sys_fspick(int dfd, const char *path, unsigned flags) +{ + return syscall(__NR_fspick, dfd, path, flags); +} + +static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) +{ + return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); +} + +static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) +{ + return syscall(__NR_fsmount, fs_fd, flags, ms_flags); +} + +static inline int sys_move_mount(int from_dfd, const char *from_path, + int to_dfd, const char *to_path, + unsigned flags) +{ + return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, flags); +} + +static int drop_priv_caps(__u64 *old_caps) +{ + return cap_disable_effective((1ULL << CAP_BPF) | + (1ULL << CAP_PERFMON) | + (1ULL << CAP_NET_ADMIN) | + (1ULL << CAP_SYS_ADMIN), old_caps); +} + +static int restore_priv_caps(__u64 old_caps) +{ + return cap_enable_effective(old_caps, NULL); +} + +static int set_delegate_mask(int fs_fd, const char *key, __u64 mask, const char *mask_str) +{ + char buf[32]; + int err; + + if (!mask_str) { + if (mask == ~0ULL) { + mask_str = "any"; + } else { + snprintf(buf, sizeof(buf), "0x%llx", (unsigned long long)mask); + mask_str = buf; + } + } + + err = sys_fsconfig(fs_fd, FSCONFIG_SET_STRING, key, + mask_str, 0); + if (err < 0) + err = -errno; + return err; +} + +#define zclose(fd) do { if (fd >= 0) close(fd); fd = -1; } while (0) + +struct bpffs_opts { + __u64 cmds; + __u64 maps; + __u64 progs; + __u64 attachs; + const char *cmds_str; + const char *maps_str; + const char *progs_str; + const char *attachs_str; +}; + +static int create_bpffs_fd(void) +{ + int fs_fd; + + /* create VFS context */ + fs_fd = sys_fsopen("bpf", 0); + ASSERT_GE(fs_fd, 0, "fs_fd"); + + return fs_fd; +} + +static int materialize_bpffs_fd(int fs_fd, struct bpffs_opts *opts) +{ + int mnt_fd, err; + + /* set up token delegation mount options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", opts->cmds, opts->cmds_str); + if (!ASSERT_OK(err, "fs_cfg_cmds")) + return err; + err = set_delegate_mask(fs_fd, "delegate_maps", opts->maps, opts->maps_str); + if (!ASSERT_OK(err, "fs_cfg_maps")) + return err; + err = set_delegate_mask(fs_fd, "delegate_progs", opts->progs, opts->progs_str); + if (!ASSERT_OK(err, "fs_cfg_progs")) + return err; + err = set_delegate_mask(fs_fd, "delegate_attachs", opts->attachs, opts->attachs_str); + if (!ASSERT_OK(err, "fs_cfg_attachs")) + return err; + + /* instantiate FS object */ + err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); + if (err < 0) + return -errno; + + /* create O_PATH fd for detached mount */ + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (err < 0) + return -errno; + + return mnt_fd; +} + +/* send FD over Unix domain (AF_UNIX) socket */ +static int sendfd(int sockfd, int fd) +{ + struct msghdr msg = {}; + struct cmsghdr *cmsg; + int fds[1] = { fd }, err; + char iobuf[1]; + struct iovec io = { + .iov_base = iobuf, + .iov_len = sizeof(iobuf), + }; + union { + char buf[CMSG_SPACE(sizeof(fds))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + cmsg = CMSG_FIRSTHDR(&msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + cmsg->cmsg_len = CMSG_LEN(sizeof(fds)); + memcpy(CMSG_DATA(cmsg), fds, sizeof(fds)); + + err = sendmsg(sockfd, &msg, 0); + if (err < 0) + err = -errno; + if (!ASSERT_EQ(err, 1, "sendmsg")) + return -EINVAL; + + return 0; +} + +/* receive FD over Unix domain (AF_UNIX) socket */ +static int recvfd(int sockfd, int *fd) +{ + struct msghdr msg = {}; + struct cmsghdr *cmsg; + int fds[1], err; + char iobuf[1]; + struct iovec io = { + .iov_base = iobuf, + .iov_len = sizeof(iobuf), + }; + union { + char buf[CMSG_SPACE(sizeof(fds))]; + struct cmsghdr align; + } u; + + msg.msg_iov = &io; + msg.msg_iovlen = 1; + msg.msg_control = u.buf; + msg.msg_controllen = sizeof(u.buf); + + err = recvmsg(sockfd, &msg, 0); + if (err < 0) + err = -errno; + if (!ASSERT_EQ(err, 1, "recvmsg")) + return -EINVAL; + + cmsg = CMSG_FIRSTHDR(&msg); + if (!ASSERT_OK_PTR(cmsg, "cmsg_null") || + !ASSERT_EQ(cmsg->cmsg_len, CMSG_LEN(sizeof(fds)), "cmsg_len") || + !ASSERT_EQ(cmsg->cmsg_level, SOL_SOCKET, "cmsg_level") || + !ASSERT_EQ(cmsg->cmsg_type, SCM_RIGHTS, "cmsg_type")) + return -EINVAL; + + memcpy(fds, CMSG_DATA(cmsg), sizeof(fds)); + *fd = fds[0]; + + return 0; +} + +static ssize_t write_nointr(int fd, const void *buf, size_t count) +{ + ssize_t ret; + + do { + ret = write(fd, buf, count); + } while (ret < 0 && errno == EINTR); + + return ret; +} + +static int write_file(const char *path, const void *buf, size_t count) +{ + int fd; + ssize_t ret; + + fd = open(path, O_WRONLY | O_CLOEXEC | O_NOCTTY | O_NOFOLLOW); + if (fd < 0) + return -1; + + ret = write_nointr(fd, buf, count); + close(fd); + if (ret < 0 || (size_t)ret != count) + return -1; + + return 0; +} + +static int create_and_enter_userns(void) +{ + uid_t uid; + gid_t gid; + char map[100]; + + uid = getuid(); + gid = getgid(); + + if (unshare(CLONE_NEWUSER)) + return -1; + + if (write_file("/proc/self/setgroups", "deny", sizeof("deny") - 1) && + errno != ENOENT) + return -1; + + snprintf(map, sizeof(map), "0 %d 1", uid); + if (write_file("/proc/self/uid_map", map, strlen(map))) + return -1; + + + snprintf(map, sizeof(map), "0 %d 1", gid); + if (write_file("/proc/self/gid_map", map, strlen(map))) + return -1; + + if (setgid(0)) + return -1; + + if (setuid(0)) + return -1; + + return 0; +} + +typedef int (*child_callback_fn)(int bpffs_fd, struct token_lsm *lsm_skel); + +static void child(int sock_fd, struct bpffs_opts *opts, child_callback_fn callback) +{ + int mnt_fd = -1, fs_fd = -1, err = 0, bpffs_fd = -1, token_fd = -1; + struct token_lsm *lsm_skel = NULL; + + /* load and attach LSM "policy" before we go into unpriv userns */ + lsm_skel = token_lsm__open_and_load(); + if (!ASSERT_OK_PTR(lsm_skel, "lsm_skel_load")) { + err = -EINVAL; + goto cleanup; + } + lsm_skel->bss->my_pid = getpid(); + err = token_lsm__attach(lsm_skel); + if (!ASSERT_OK(err, "lsm_skel_attach")) + goto cleanup; + + /* setup userns with root mappings */ + err = create_and_enter_userns(); + if (!ASSERT_OK(err, "create_and_enter_userns")) + goto cleanup; + + /* setup mountns to allow creating BPF FS (fsopen("bpf")) from unpriv process */ + err = unshare(CLONE_NEWNS); + if (!ASSERT_OK(err, "create_mountns")) + goto cleanup; + + err = sys_mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); + if (!ASSERT_OK(err, "remount_root")) + goto cleanup; + + fs_fd = create_bpffs_fd(); + if (!ASSERT_GE(fs_fd, 0, "create_bpffs_fd")) { + err = -EINVAL; + goto cleanup; + } + + /* ensure unprivileged child cannot set delegation options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm"); + err = set_delegate_mask(fs_fd, "delegate_maps", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_maps_eperm"); + err = set_delegate_mask(fs_fd, "delegate_progs", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_progs_eperm"); + err = set_delegate_mask(fs_fd, "delegate_attachs", 0x1, NULL); + ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm"); + + /* pass BPF FS context object to parent */ + err = sendfd(sock_fd, fs_fd); + if (!ASSERT_OK(err, "send_fs_fd")) + goto cleanup; + zclose(fs_fd); + + /* avoid mucking around with mount namespaces and mounting at + * well-known path, just get detach-mounted BPF FS fd back from parent + */ + err = recvfd(sock_fd, &mnt_fd); + if (!ASSERT_OK(err, "recv_mnt_fd")) + goto cleanup; + + /* try to fspick() BPF FS and try to add some delegation options */ + fs_fd = sys_fspick(mnt_fd, "", FSPICK_EMPTY_PATH); + if (!ASSERT_GE(fs_fd, 0, "bpffs_fspick")) { + err = -EINVAL; + goto cleanup; + } + + /* ensure unprivileged child cannot reconfigure to set delegation options */ + err = set_delegate_mask(fs_fd, "delegate_cmds", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_cmd_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_maps", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_maps_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_progs", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_progs_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + err = set_delegate_mask(fs_fd, "delegate_attachs", 0, "any"); + if (!ASSERT_EQ(err, -EPERM, "delegate_attachs_eperm_reconfig")) { + err = -EINVAL; + goto cleanup; + } + zclose(fs_fd); + + bpffs_fd = openat(mnt_fd, ".", 0, O_RDWR); + if (!ASSERT_GE(bpffs_fd, 0, "bpffs_open")) { + err = -EINVAL; + goto cleanup; + } + + /* create BPF token FD and pass it to parent for some extra checks */ + token_fd = bpf_token_create(bpffs_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "child_token_create")) { + err = -EINVAL; + goto cleanup; + } + err = sendfd(sock_fd, token_fd); + if (!ASSERT_OK(err, "send_token_fd")) + goto cleanup; + zclose(token_fd); + + /* do custom test logic with customly set up BPF FS instance */ + err = callback(bpffs_fd, lsm_skel); + if (!ASSERT_OK(err, "test_callback")) + goto cleanup; + + err = 0; +cleanup: + zclose(sock_fd); + zclose(mnt_fd); + zclose(fs_fd); + zclose(bpffs_fd); + zclose(token_fd); + + lsm_skel->bss->my_pid = 0; + token_lsm__destroy(lsm_skel); + + exit(-err); +} + +static int wait_for_pid(pid_t pid) +{ + int status, ret; + +again: + ret = waitpid(pid, &status, 0); + if (ret == -1) { + if (errno == EINTR) + goto again; + + return -1; + } + + if (!WIFEXITED(status)) + return -1; + + return WEXITSTATUS(status); +} + +static void parent(int child_pid, struct bpffs_opts *bpffs_opts, int sock_fd) +{ + int fs_fd = -1, mnt_fd = -1, token_fd = -1, err; + + err = recvfd(sock_fd, &fs_fd); + if (!ASSERT_OK(err, "recv_bpffs_fd")) + goto cleanup; + + mnt_fd = materialize_bpffs_fd(fs_fd, bpffs_opts); + if (!ASSERT_GE(mnt_fd, 0, "materialize_bpffs_fd")) { + err = -EINVAL; + goto cleanup; + } + zclose(fs_fd); + + /* pass BPF FS context object to parent */ + err = sendfd(sock_fd, mnt_fd); + if (!ASSERT_OK(err, "send_mnt_fd")) + goto cleanup; + zclose(mnt_fd); + + /* receive BPF token FD back from child for some extra tests */ + err = recvfd(sock_fd, &token_fd); + if (!ASSERT_OK(err, "recv_token_fd")) + goto cleanup; + + err = wait_for_pid(child_pid); + ASSERT_OK(err, "waitpid_child"); + +cleanup: + zclose(sock_fd); + zclose(fs_fd); + zclose(mnt_fd); + zclose(token_fd); + + if (child_pid > 0) + (void)kill(child_pid, SIGKILL); +} + +static void subtest_userns(struct bpffs_opts *bpffs_opts, + child_callback_fn child_cb) +{ + int sock_fds[2] = { -1, -1 }; + int child_pid = 0, err; + + err = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_fds); + if (!ASSERT_OK(err, "socketpair")) + goto cleanup; + + child_pid = fork(); + if (!ASSERT_GE(child_pid, 0, "fork")) + goto cleanup; + + if (child_pid == 0) { + zclose(sock_fds[0]); + return child(sock_fds[1], bpffs_opts, child_cb); + + } else { + zclose(sock_fds[1]); + return parent(child_pid, bpffs_opts, sock_fds[0]); + } + +cleanup: + zclose(sock_fds[0]); + zclose(sock_fds[1]); + if (child_pid > 0) + (void)kill(child_pid, SIGKILL); +} + +static int userns_map_create(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_map_create_opts, map_opts); + int err, token_fd = -1, map_fd = -1; + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* while inside non-init userns, we need both a BPF token *and* + * CAP_BPF inside current userns to create privileged map; let's test + * that neither BPF token alone nor namespaced CAP_BPF is sufficient + */ + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* no token, no CAP_BPF -> fail */ + map_opts.map_flags = 0; + map_opts.token_fd = 0; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_wo_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_wo_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* token without CAP_BPF -> fail */ + map_opts.map_flags = BPF_F_TOKEN_FD; + map_opts.token_fd = token_fd; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_wo_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_w_token_wo_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ + err = restore_priv_caps(old_caps); + if (!ASSERT_OK(err, "restore_caps")) + goto cleanup; + + /* CAP_BPF without token -> fail */ + map_opts.map_flags = 0; + map_opts.token_fd = 0; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "wo_token_w_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_LT(map_fd, 0, "stack_map_wo_token_w_cap_bpf_should_fail")) { + err = -EINVAL; + goto cleanup; + } + + /* finally, namespaced CAP_BPF + token -> success */ + map_opts.map_flags = BPF_F_TOKEN_FD; + map_opts.token_fd = token_fd; + map_fd = bpf_map_create(BPF_MAP_TYPE_STACK, "w_token_w_bpf", 0, 8, 1, &map_opts); + if (!ASSERT_GT(map_fd, 0, "stack_map_w_token_w_cap_bpf")) { + err = -EINVAL; + goto cleanup; + } + +cleanup: + zclose(token_fd); + zclose(map_fd); + return err; +} + +static int userns_btf_load(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_btf_load_opts, btf_opts); + int err, token_fd = -1, btf_fd = -1; + const void *raw_btf_data; + struct btf *btf = NULL; + __u32 raw_btf_size; + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* while inside non-init userns, we need both a BPF token *and* + * CAP_BPF inside current userns to create privileged map; let's test + * that neither BPF token alone nor namespaced CAP_BPF is sufficient + */ + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* setup a trivial BTF data to load to the kernel */ + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + goto cleanup; + + ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "int_type"); + + raw_btf_data = btf__raw_data(btf, &raw_btf_size); + if (!ASSERT_OK_PTR(raw_btf_data, "raw_btf_data")) + goto cleanup; + + /* no token + no CAP_BPF -> failure */ + btf_opts.btf_flags = 0; + btf_opts.token_fd = 0; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_LT(btf_fd, 0, "no_token_no_cap_should_fail")) + goto cleanup; + + /* token + no CAP_BPF -> failure */ + btf_opts.btf_flags = BPF_F_TOKEN_FD; + btf_opts.token_fd = token_fd; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_LT(btf_fd, 0, "token_no_cap_should_fail")) + goto cleanup; + + /* get back effective local CAP_BPF (and CAP_SYS_ADMIN) */ + err = restore_priv_caps(old_caps); + if (!ASSERT_OK(err, "restore_caps")) + goto cleanup; + + /* token + CAP_BPF -> success */ + btf_opts.btf_flags = BPF_F_TOKEN_FD; + btf_opts.token_fd = token_fd; + btf_fd = bpf_btf_load(raw_btf_data, raw_btf_size, &btf_opts); + if (!ASSERT_GT(btf_fd, 0, "token_and_cap_success")) + goto cleanup; + + err = 0; +cleanup: + btf__free(btf); + zclose(btf_fd); + zclose(token_fd); + return err; +} + +static int userns_prog_load(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_prog_load_opts, prog_opts); + int err, token_fd = -1, prog_fd = -1; + struct bpf_insn insns[] = { + /* bpf_jiffies64() requires CAP_BPF */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_jiffies64), + /* bpf_get_current_task() requires CAP_PERFMON */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_current_task), + /* r0 = 0; exit; */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + size_t insn_cnt = ARRAY_SIZE(insns); + __u64 old_caps = 0; + + /* create BPF token from BPF FS mount */ + token_fd = bpf_token_create(mnt_fd, NULL); + if (!ASSERT_GT(token_fd, 0, "token_create")) { + err = -EINVAL; + goto cleanup; + } + + /* validate we can successfully load BPF program with token; this + * being XDP program (CAP_NET_ADMIN) using bpf_jiffies64() (CAP_BPF) + * and bpf_get_current_task() (CAP_PERFMON) helpers validates we have + * BPF token wired properly in a bunch of places in the kernel + */ + prog_opts.prog_flags = BPF_F_TOKEN_FD; + prog_opts.token_fd = token_fd; + prog_opts.expected_attach_type = BPF_XDP; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_GT(prog_fd, 0, "prog_fd")) { + err = -EPERM; + goto cleanup; + } + + /* no token + caps -> failure */ + prog_opts.prog_flags = 0; + prog_opts.token_fd = 0; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + err = drop_priv_caps(&old_caps); + if (!ASSERT_OK(err, "drop_caps")) + goto cleanup; + + /* no caps + token -> failure */ + prog_opts.prog_flags = BPF_F_TOKEN_FD; + prog_opts.token_fd = token_fd; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + /* no caps + no token -> definitely a failure */ + prog_opts.prog_flags = 0; + prog_opts.token_fd = 0; + prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, "token_prog", "GPL", + insns, insn_cnt, &prog_opts); + if (!ASSERT_EQ(prog_fd, -EPERM, "prog_fd_eperm")) { + err = -EPERM; + goto cleanup; + } + + err = 0; +cleanup: + zclose(prog_fd); + zclose(token_fd); + return err; +} + +static int userns_obj_priv_map(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct priv_map *skel; + int err; + + skel = priv_map__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + priv_map__destroy(skel); + return -EINVAL; + } + + /* use bpf_token_path to provide BPF FS path */ + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = priv_map__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + + err = priv_map__load(skel); + priv_map__destroy(skel); + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + + return 0; +} + +static int userns_obj_priv_prog(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct priv_prog *skel; + int err; + + skel = priv_prog__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + priv_prog__destroy(skel); + return -EINVAL; + } + + /* use bpf_token_path to provide BPF FS path */ + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + + /* provide BPF token, but reject bpf_token_capable() with LSM */ + lsm_skel->bss->reject_capable = true; + lsm_skel->bss->reject_cmd = false; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cap_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_ERR(err, "obj_token_lsm_reject_cap_load")) + return -EINVAL; + + /* provide BPF token, but reject bpf_token_cmd() with LSM */ + lsm_skel->bss->reject_capable = false; + lsm_skel->bss->reject_cmd = true; + skel = priv_prog__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_lsm_reject_cmd_open")) + return -EINVAL; + err = priv_prog__load(skel); + priv_prog__destroy(skel); + if (!ASSERT_ERR(err, "obj_token_lsm_reject_cmd_load")) + return -EINVAL; + + return 0; +} + +/* this test is called with BPF FS that doesn't delegate BPF_BTF_LOAD command, + * which should cause struct_ops application to fail, as BTF won't be uploaded + * into the kernel, even if STRUCT_OPS programs themselves are allowed + */ +static int validate_struct_ops_load(int mnt_fd, bool expect_success) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + char buf[256]; + struct dummy_st_ops_success *skel; + int err; + + snprintf(buf, sizeof(buf), "/proc/self/fd/%d", mnt_fd); + opts.bpf_token_path = buf; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_token_path_open")) + return -EINVAL; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (expect_success) { + if (!ASSERT_OK(err, "obj_token_path_load")) + return -EINVAL; + } else /* expect failure */ { + if (!ASSERT_ERR(err, "obj_token_path_load")) + return -EINVAL; + } + + return 0; +} + +static int userns_obj_priv_btf_fail(int mnt_fd, struct token_lsm *lsm_skel) +{ + return validate_struct_ops_load(mnt_fd, false /* should fail */); +} + +static int userns_obj_priv_btf_success(int mnt_fd, struct token_lsm *lsm_skel) +{ + return validate_struct_ops_load(mnt_fd, true /* should succeed */); +} + +#define TOKEN_ENVVAR "LIBBPF_BPF_TOKEN_PATH" +#define TOKEN_BPFFS_CUSTOM "/bpf-token-fs" + +static int userns_obj_priv_implicit_token(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct dummy_st_ops_success *skel; + int err; + + /* before we mount BPF FS with token delegation, struct_ops skeleton + * should fail to load + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + + /* mount custom BPF FS over /sys/fs/bpf so that libbpf can create BPF + * token automatically and implicitly + */ + err = sys_move_mount(mnt_fd, "", AT_FDCWD, "/sys/fs/bpf", MOVE_MOUNT_F_EMPTY_PATH); + if (!ASSERT_OK(err, "move_mount_bpffs")) + return -EINVAL; + + /* disable implicit BPF token creation by setting + * LIBBPF_BPF_TOKEN_PATH envvar to empty value, load should fail + */ + err = setenv(TOKEN_ENVVAR, "", 1 /*overwrite*/); + if (!ASSERT_OK(err, "setenv_token_path")) + return -EINVAL; + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_token_envvar_disabled_load")) { + unsetenv(TOKEN_ENVVAR); + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + unsetenv(TOKEN_ENVVAR); + + /* now the same struct_ops skeleton should succeed thanks to libppf + * creating BPF token from /sys/fs/bpf mount point + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) + return -EINVAL; + + dummy_st_ops_success__destroy(skel); + + /* now disable implicit token through empty bpf_token_path, should fail */ + opts.bpf_token_path = ""; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) + return -EINVAL; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (!ASSERT_ERR(err, "obj_empty_token_path_load")) + return -EINVAL; + + return 0; +} + +static int userns_obj_priv_implicit_token_envvar(int mnt_fd, struct token_lsm *lsm_skel) +{ + LIBBPF_OPTS(bpf_object_open_opts, opts); + struct dummy_st_ops_success *skel; + int err; + + /* before we mount BPF FS with token delegation, struct_ops skeleton + * should fail to load + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load")) { + dummy_st_ops_success__destroy(skel); + return -EINVAL; + } + + /* mount custom BPF FS over custom location, so libbpf can't create + * BPF token implicitly, unless pointed to it through + * LIBBPF_BPF_TOKEN_PATH envvar + */ + rmdir(TOKEN_BPFFS_CUSTOM); + if (!ASSERT_OK(mkdir(TOKEN_BPFFS_CUSTOM, 0777), "mkdir_bpffs_custom")) + goto err_out; + err = sys_move_mount(mnt_fd, "", AT_FDCWD, TOKEN_BPFFS_CUSTOM, MOVE_MOUNT_F_EMPTY_PATH); + if (!ASSERT_OK(err, "move_mount_bpffs")) + goto err_out; + + /* even though we have BPF FS with delegation, it's not at default + * /sys/fs/bpf location, so we still fail to load until envvar is set up + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_ERR_PTR(skel, "obj_tokenless_load2")) { + dummy_st_ops_success__destroy(skel); + goto err_out; + } + + err = setenv(TOKEN_ENVVAR, TOKEN_BPFFS_CUSTOM, 1 /*overwrite*/); + if (!ASSERT_OK(err, "setenv_token_path")) + goto err_out; + + /* now the same struct_ops skeleton should succeed thanks to libppf + * creating BPF token from custom mount point + */ + skel = dummy_st_ops_success__open_and_load(); + if (!ASSERT_OK_PTR(skel, "obj_implicit_token_load")) + goto err_out; + + dummy_st_ops_success__destroy(skel); + + /* now disable implicit token through empty bpf_token_path, envvar + * will be ignored, should fail + */ + opts.bpf_token_path = ""; + skel = dummy_st_ops_success__open_opts(&opts); + if (!ASSERT_OK_PTR(skel, "obj_empty_token_path_open")) + goto err_out; + + err = dummy_st_ops_success__load(skel); + dummy_st_ops_success__destroy(skel); + if (!ASSERT_ERR(err, "obj_empty_token_path_load")) + goto err_out; + + rmdir(TOKEN_BPFFS_CUSTOM); + unsetenv(TOKEN_ENVVAR); + return 0; +err_out: + rmdir(TOKEN_BPFFS_CUSTOM); + unsetenv(TOKEN_ENVVAR); + return -EINVAL; +} + +#define bit(n) (1ULL << (n)) + +void test_token(void) +{ + if (test__start_subtest("map_token")) { + struct bpffs_opts opts = { + .cmds_str = "map_create", + .maps_str = "stack", + }; + + subtest_userns(&opts, userns_map_create); + } + if (test__start_subtest("btf_token")) { + struct bpffs_opts opts = { + .cmds = 1ULL << BPF_BTF_LOAD, + }; + + subtest_userns(&opts, userns_btf_load); + } + if (test__start_subtest("prog_token")) { + struct bpffs_opts opts = { + .cmds_str = "PROG_LOAD", + .progs_str = "XDP", + .attachs_str = "xdp", + }; + + subtest_userns(&opts, userns_prog_load); + } + if (test__start_subtest("obj_priv_map")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_MAP_CREATE), + .maps = bit(BPF_MAP_TYPE_QUEUE), + }; + + subtest_userns(&opts, userns_obj_priv_map); + } + if (test__start_subtest("obj_priv_prog")) { + struct bpffs_opts opts = { + .cmds = bit(BPF_PROG_LOAD), + .progs = bit(BPF_PROG_TYPE_KPROBE), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_prog); + } + if (test__start_subtest("obj_priv_btf_fail")) { + struct bpffs_opts opts = { + /* disallow BTF loading */ + .cmds = bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_btf_fail); + } + if (test__start_subtest("obj_priv_btf_success")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_btf_success); + } + if (test__start_subtest("obj_priv_implicit_token")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_implicit_token); + } + if (test__start_subtest("obj_priv_implicit_token_envvar")) { + struct bpffs_opts opts = { + /* allow BTF loading */ + .cmds = bit(BPF_BTF_LOAD) | bit(BPF_MAP_CREATE) | bit(BPF_PROG_LOAD), + .maps = bit(BPF_MAP_TYPE_STRUCT_OPS), + .progs = bit(BPF_PROG_TYPE_STRUCT_OPS), + .attachs = ~0ULL, + }; + + subtest_userns(&opts, userns_obj_priv_implicit_token_envvar); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_failure.c b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c new file mode 100644 index 0000000000..a222df765b --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tracing_failure.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include "tracing_failure.skel.h" + +static void test_bpf_spin_lock(bool is_spin_lock) +{ + struct tracing_failure *skel; + int err; + + skel = tracing_failure__open(); + if (!ASSERT_OK_PTR(skel, "tracing_failure__open")) + return; + + if (is_spin_lock) + bpf_program__set_autoload(skel->progs.test_spin_lock, true); + else + bpf_program__set_autoload(skel->progs.test_spin_unlock, true); + + err = tracing_failure__load(skel); + if (!ASSERT_OK(err, "tracing_failure__load")) + goto out; + + err = tracing_failure__attach(skel); + ASSERT_ERR(err, "tracing_failure__attach"); + +out: + tracing_failure__destroy(skel); +} + +void test_tracing_failure(void) +{ + if (test__start_subtest("bpf_spin_lock")) + test_bpf_spin_lock(true); + if (test__start_subtest("bpf_spin_unlock")) + test_bpf_spin_lock(false); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index d62c5bf00e..c4f9f30664 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -4,6 +4,8 @@ #include "cap_helpers.h" #include "verifier_and.skel.h" +#include "verifier_arena.skel.h" +#include "verifier_arena_large.skel.h" #include "verifier_array_access.skel.h" #include "verifier_basic_stack.skel.h" #include "verifier_bitfield_write.skel.h" @@ -28,6 +30,7 @@ #include "verifier_div0.skel.h" #include "verifier_div_overflow.skel.h" #include "verifier_global_subprogs.skel.h" +#include "verifier_global_ptr_args.skel.h" #include "verifier_gotol.skel.h" #include "verifier_helper_access_var_len.skel.h" #include "verifier_helper_packet_access.skel.h" @@ -117,6 +120,8 @@ static void run_tests_aux(const char *skel_name, #define RUN(skel) run_tests_aux(#skel, skel##__elf_bytes, NULL) void test_verifier_and(void) { RUN(verifier_and); } +void test_verifier_arena(void) { RUN(verifier_arena); } +void test_verifier_arena_large(void) { RUN(verifier_arena_large); } void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); } void test_verifier_bitfield_write(void) { RUN(verifier_bitfield_write); } void test_verifier_bounds(void) { RUN(verifier_bounds); } @@ -140,6 +145,7 @@ void test_verifier_direct_stack_access_wraparound(void) { RUN(verifier_direct_st void test_verifier_div0(void) { RUN(verifier_div0); } void test_verifier_div_overflow(void) { RUN(verifier_div_overflow); } void test_verifier_global_subprogs(void) { RUN(verifier_global_subprogs); } +void test_verifier_global_ptr_args(void) { RUN(verifier_global_ptr_args); } void test_verifier_gotol(void) { RUN(verifier_gotol); } void test_verifier_helper_access_var_len(void) { RUN(verifier_helper_access_var_len); } void test_verifier_helper_packet_access(void) { RUN(verifier_helper_packet_access); } diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c index bad0ea167b..498d3bdaa4 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c @@ -107,8 +107,8 @@ void test_xdp_do_redirect(void) .attach_point = BPF_TC_INGRESS); memcpy(&data[sizeof(__u64)], &pkt_udp, sizeof(pkt_udp)); - ((__u32 *)data)[0] = 0x42; /* metadata test value */ - ((__u32 *)data)[1] = 0; + *((__u32 *)data) = 0x42; /* metadata test value */ + *((__u32 *)data + 4) = 0; skel = test_xdp_do_redirect__open(); if (!ASSERT_OK_PTR(skel, "skel")) diff --git a/tools/testing/selftests/bpf/prog_tests/xdpwall.c b/tools/testing/selftests/bpf/prog_tests/xdpwall.c index f3927829a5..4599154c8e 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdpwall.c +++ b/tools/testing/selftests/bpf/prog_tests/xdpwall.c @@ -9,7 +9,7 @@ void test_xdpwall(void) struct xdpwall *skel; skel = xdpwall__open_and_load(); - ASSERT_OK_PTR(skel, "Does LLMV have https://reviews.llvm.org/D109073?"); + ASSERT_OK_PTR(skel, "Does LLVM have https://github.com/llvm/llvm-project/commit/ea72b0319d7b0f0c2fcf41d121afa5d031b319d5?"); xdpwall__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c new file mode 100644 index 0000000000..1e6ac187a6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_htab.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +} arena SEC(".maps"); + +#include "bpf_arena_htab.h" + +void __arena *htab_for_user; +bool skip = false; + +int zero = 0; + +SEC("syscall") +int arena_htab_llvm(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM) + struct htab __arena *htab; + __u64 i; + + htab = bpf_alloc(sizeof(*htab)); + cast_kern(htab); + htab_init(htab); + + /* first run. No old elems in the table */ + for (i = zero; i < 1000; i++) + htab_update_elem(htab, i, i); + + /* should replace all elems with new ones */ + for (i = zero; i < 1000; i++) + htab_update_elem(htab, i, i); + cast_user(htab); + htab_for_user = htab; +#else + skip = true; +#endif + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/arena_htab_asm.c b/tools/testing/selftests/bpf/progs/arena_htab_asm.c new file mode 100644 index 0000000000..6cd70ea12f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_htab_asm.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_ARENA_FORCE_ASM +#define arena_htab_llvm arena_htab_asm +#include "arena_htab.c" diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c new file mode 100644 index 0000000000..c0422c58ce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_list.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */ +#else + __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +#include "bpf_arena_alloc.h" +#include "bpf_arena_list.h" + +struct elem { + struct arena_list_node node; + __u64 value; +}; + +struct arena_list_head __arena *list_head; +int list_sum; +int cnt; +bool skip = false; + +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST +long __arena arena_sum; +int __arena test_val = 1; +struct arena_list_head __arena global_head; +#else +long arena_sum SEC(".addr_space.1"); +int test_val SEC(".addr_space.1"); +#endif + +int zero; + +SEC("syscall") +int arena_list_add(void *ctx) +{ +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST + __u64 i; + + list_head = &global_head; + + for (i = zero; i < cnt; cond_break, i++) { + struct elem __arena *n = bpf_alloc(sizeof(*n)); + + test_val++; + n->value = i; + arena_sum += i; + list_add_head(&n->node, list_head); + } +#else + skip = true; +#endif + return 0; +} + +SEC("syscall") +int arena_list_del(void *ctx) +{ +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST + struct elem __arena *n; + int sum = 0; + + arena_sum = 0; + list_for_each_entry(n, list_head, node) { + sum += n->value; + arena_sum += n->value; + list_del(&n->node); + bpf_free(n); + } + list_sum = sum; +#else + skip = true; +#endif + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c index 3517c0e012..36734683ac 100644 --- a/tools/testing/selftests/bpf/progs/async_stack_depth.c +++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c @@ -30,7 +30,7 @@ static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer) } SEC("tc") -__failure __msg("combined stack size of 2 calls is 576. Too large") +__failure __msg("combined stack size of 2 calls is") int pseudo_call_check(struct __sk_buff *ctx) { struct hmap_elem *elem; @@ -45,7 +45,7 @@ int pseudo_call_check(struct __sk_buff *ctx) } SEC("tc") -__failure __msg("combined stack size of 2 calls is 608. Too large") +__failure __msg("combined stack size of 2 calls is") int async_call_root_check(struct __sk_buff *ctx) { struct hmap_elem *elem; diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c new file mode 100644 index 0000000000..b7e175cd0a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) { return 0; } + +SEC("struct_ops/test_2") +int BPF_PROG(test_2) { return 0; } + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2 +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops2 testmod_2 = { + .test_1 = (void *)test_1 +}; diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops2.c b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c new file mode 100644 index 0000000000..64a95f6be8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +/* This is an unused struct_ops program, it lacks corresponding + * struct_ops map, which provides attachment information. + * W/o additional configuration attempt to load such + * BPF object file would fail. + */ +SEC("struct_ops/foo") +void foo(void) {} diff --git a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c index c8ec0d0368..e4bfbba6c1 100644 --- a/tools/testing/selftests/bpf/progs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/progs/bench_local_storage_create.c @@ -61,15 +61,14 @@ SEC("lsm.s/socket_post_create") int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, int protocol, int kern) { - struct sock *sk = sock->sk; struct storage *stg; __u32 pid; pid = bpf_get_current_pid_tgid() >> 32; - if (pid != bench_pid || !sk) + if (pid != bench_pid) return 0; - stg = bpf_sk_storage_get(&sk_storage_map, sk, NULL, + stg = bpf_sk_storage_get(&sk_storage_map, sock->sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE); if (stg) diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h new file mode 100644 index 0000000000..a7c343dc82 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BPF_COMPILER_H__ +#define __BPF_COMPILER_H__ + +#define DO_PRAGMA_(X) _Pragma(#X) + +#if __clang__ +#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable)) +#else +/* In GCC -funroll-loops, which is enabled with -O2, should have the + same impact than the loop-unroll-enable pragma above. */ +#define __pragma_loop_unroll +#endif + +#if __clang__ +#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N)) +#else +#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N) +#endif + +#if __clang__ +#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full)) +#else +#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534) +#endif + +#if __clang__ +#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable)) +#else +#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1) +#endif + +#endif diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 2fd59970c4..fb2f5513e2 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -80,7 +80,7 @@ #define __imm(name) [name]"i"(name) #define __imm_const(name, expr) [name]"i"(expr) #define __imm_addr(name) [name]"i"(&name) -#define __imm_ptr(name) [name]"p"(&name) +#define __imm_ptr(name) [name]"r"(&name) #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) /* Magic constants used with __retval() */ diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index e8bd4b7b5e..7001965d1c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -51,9 +51,25 @@ #define ICSK_TIME_LOSS_PROBE 5 #define ICSK_TIME_REO_TIMEOUT 6 +#define ETH_ALEN 6 #define ETH_HLEN 14 +#define ETH_P_IP 0x0800 #define ETH_P_IPV6 0x86DD +#define NEXTHDR_TCP 6 + +#define TCPOPT_NOP 1 +#define TCPOPT_EOL 0 +#define TCPOPT_MSS 2 +#define TCPOPT_WINDOW 3 +#define TCPOPT_TIMESTAMP 8 +#define TCPOPT_SACK_PERM 4 + +#define TCPOLEN_MSS 4 +#define TCPOLEN_WINDOW 3 +#define TCPOLEN_TIMESTAMP 10 +#define TCPOLEN_SACK_PERM 2 + #define CHECKSUM_NONE 0 #define CHECKSUM_PARTIAL 3 diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c index 610c2427fd..3500e4b69e 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c @@ -27,32 +27,6 @@ bool is_cgroup1 = 0; struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; void bpf_cgroup_release(struct cgroup *cgrp) __ksym; -static void __on_lookup(struct cgroup *cgrp) -{ - bpf_cgrp_storage_delete(&map_a, cgrp); - bpf_cgrp_storage_delete(&map_b, cgrp); -} - -SEC("fentry/bpf_local_storage_lookup") -int BPF_PROG(on_lookup) -{ - struct task_struct *task = bpf_get_current_task_btf(); - struct cgroup *cgrp; - - if (is_cgroup1) { - cgrp = bpf_task_get_cgroup1(task, target_hid); - if (!cgrp) - return 0; - - __on_lookup(cgrp); - bpf_cgroup_release(cgrp); - return 0; - } - - __on_lookup(task->cgroups->dfl_cgrp); - return 0; -} - static void __on_update(struct cgroup *cgrp) { long *ptr; diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c index ca8aa2f116..2ef0e0c46d 100644 --- a/tools/testing/selftests/bpf/progs/connect_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c @@ -28,8 +28,7 @@ int connect_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 0; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 0; diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 0cd4aebb97..c705d8112a 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -23,41 +23,42 @@ struct array_map { __uint(max_entries, 1); } __cpumask_map SEC(".maps"); -struct bpf_cpumask *bpf_cpumask_create(void) __ksym; -void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; -struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; -u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; -u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak; +void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak; +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak; +u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak; +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak; u32 bpf_cpumask_first_and(const struct cpumask *src1, - const struct cpumask *src2) __ksym; -void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; + const struct cpumask *src2) __ksym __weak; +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak; bool bpf_cpumask_and(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; + const struct cpumask *src2) __ksym __weak; void bpf_cpumask_or(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; + const struct cpumask *src2) __ksym __weak; void bpf_cpumask_xor(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; -bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; -void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; -u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; - -void bpf_rcu_read_lock(void) __ksym; -void bpf_rcu_read_unlock(void) __ksym; + const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak; +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak; +u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak; +u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, + const struct cpumask *src2) __ksym __weak; +u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak; + +void bpf_rcu_read_lock(void) __ksym __weak; +void bpf_rcu_read_unlock(void) __ksym __weak; static inline const struct cpumask *cast(struct bpf_cpumask *cpumask) { diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c index 9c078f34bb..5a76754f84 100644 --- a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c @@ -27,8 +27,7 @@ int getpeername_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c index ac71451114..7867113c69 100644 --- a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c @@ -27,8 +27,7 @@ int getsockname_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index fe971992e6..3db416606f 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -5,6 +5,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_compiler.h" #define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) @@ -78,8 +79,8 @@ int iter_err_unsafe_asm_loop(const void *ctx) "*(u32 *)(r1 + 0) = r6;" /* invalid */ : : [it]"r"(&it), - [small_arr]"p"(small_arr), - [zero]"p"(zero), + [small_arr]"r"(small_arr), + [zero]"r"(zero), __imm(bpf_iter_num_new), __imm(bpf_iter_num_next), __imm(bpf_iter_num_destroy) @@ -183,7 +184,7 @@ int iter_pragma_unroll_loop(const void *ctx) MY_PID_GUARD(); bpf_iter_num_new(&it, 0, 2); -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 3; i++) { v = bpf_iter_num_next(&it); bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); @@ -238,7 +239,7 @@ int iter_multiple_sequential_loops(const void *ctx) bpf_iter_num_destroy(&it); bpf_iter_num_new(&it, 0, 2); -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 3; i++) { v = bpf_iter_num_next(&it); bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c new file mode 100644 index 0000000000..2414ac20b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <linux/types.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_experimental.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct bin_data { + char blob[32]; +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(kptr) struct bin_data __kptr * ptr; + +SEC("tc") +__naked int kptr_xchg_inline(void) +{ + asm volatile ( + "r1 = %[ptr] ll;" + "r2 = 0;" + "call %[bpf_kptr_xchg];" + "if r0 == 0 goto 1f;" + "r1 = r0;" + "r2 = 0;" + "call %[bpf_obj_drop_impl];" + "1:" + "r0 = 0;" + "exit;" + : + : __imm_addr(ptr), + __imm(bpf_kptr_xchg), + __imm(bpf_obj_drop_impl) + : __clobber_all + ); +} + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void __btf_root(void) +{ + bpf_obj_drop(NULL); +} diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c index 637e75df2e..e5e3a8b8dd 100644 --- a/tools/testing/selftests/bpf/progs/local_storage.c +++ b/tools/testing/selftests/bpf/progs/local_storage.c @@ -140,12 +140,11 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, { __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; - struct sock *sk = sock->sk; - if (pid != monitored_pid || !sk) + if (pid != monitored_pid) return 0; - storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, 0); + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, 0); if (!storage) return 0; @@ -156,24 +155,24 @@ int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address, /* This tests that we can associate multiple elements * with the local storage. */ - storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0, + storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (!storage) return 0; - if (bpf_sk_storage_delete(&sk_storage_map2, sk)) + if (bpf_sk_storage_delete(&sk_storage_map2, sock->sk)) return 0; - storage = bpf_sk_storage_get(&sk_storage_map2, sk, 0, + storage = bpf_sk_storage_get(&sk_storage_map2, sock->sk, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (!storage) return 0; - if (bpf_sk_storage_delete(&sk_storage_map, sk)) + if (bpf_sk_storage_delete(&sk_storage_map, sock->sk)) return 0; /* Ensure that the sk_storage_map is disconnected from the storage. */ - if (!sk->sk_bpf_storage || sk->sk_bpf_storage->smap) + if (!sock->sk->sk_bpf_storage || sock->sk->sk_bpf_storage->smap) return 0; sk_storage_result = 0; @@ -186,12 +185,11 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type, { __u32 pid = bpf_get_current_pid_tgid() >> 32; struct local_storage *storage; - struct sock *sk = sock->sk; - if (pid != monitored_pid || !sk) + if (pid != monitored_pid) return 0; - storage = bpf_sk_storage_get(&sk_storage_map, sk, 0, + storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); if (!storage) return 0; diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c index b35337926d..0de0357f57 100644 --- a/tools/testing/selftests/bpf/progs/loop4.c +++ b/tools/testing/selftests/bpf/progs/loop4.c @@ -3,6 +3,8 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + char _license[] SEC("license") = "GPL"; SEC("socket") @@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb) { int ret = 0, i; -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 20; i++) if (skb->len) ret |= 1 << i; diff --git a/tools/testing/selftests/bpf/progs/lsm_cgroup.c b/tools/testing/selftests/bpf/progs/lsm_cgroup.c index d7598538aa..02c11d16b6 100644 --- a/tools/testing/selftests/bpf/progs/lsm_cgroup.c +++ b/tools/testing/selftests/bpf/progs/lsm_cgroup.c @@ -103,15 +103,11 @@ static __always_inline int real_bind(struct socket *sock, int addrlen) { struct sockaddr_ll sa = {}; - struct sock *sk = sock->sk; - if (!sk) - return 1; - - if (sk->__sk_common.skc_family != AF_PACKET) + if (sock->sk->__sk_common.skc_family != AF_PACKET) return 1; - if (sk->sk_kern_sock) + if (sock->sk->sk_kern_sock) return 1; bpf_probe_read_kernel(&sa, sizeof(sa), address); diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index 3325da17ec..efaf622c28 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -316,7 +316,7 @@ struct lpm_trie { } __attribute__((preserve_access_index)); struct lpm_key { - struct bpf_lpm_trie_key trie_key; + struct bpf_lpm_trie_key_hdr trie_key; __u32 data; }; diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c new file mode 100644 index 0000000000..9085be50f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/priv_map.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 1); + __type(value, __u32); +} priv_map SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c new file mode 100644 index 0000000000..3c7b2b618c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/priv_prog.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("kprobe") +int kprobe_prog(void *ctx) +{ + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index de3b6e4e4d..6957d9f280 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -8,6 +8,7 @@ #include "profiler.h" #include "err.h" #include "bpf_experimental.h" +#include "bpf_compiler.h" #ifndef NULL #define NULL 0 @@ -169,7 +170,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct, int spid) { #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) if (arr_struct->array[i].meta.pid == spid) @@ -185,7 +186,7 @@ static INLINE void populate_ancestors(struct task_struct* task, ancestors_data->num_ancestors = 0; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) { parent = BPF_CORE_READ(parent, real_parent); @@ -212,7 +213,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node, size_t filepart_length; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) { filepart_length = @@ -261,7 +262,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data, int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, pids_cgrp_id___local); #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys_state* subsys = @@ -402,7 +403,7 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig) if (kill_data == NULL) return 0; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) if (arr_struct->array[i].meta.pid == 0) { @@ -482,7 +483,7 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload) struct dentry* parent_dentry; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_PATH_DEPTH; i++) { filepart_length = @@ -508,7 +509,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry) { struct dentry* parent_dentry; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_PATH_DEPTH; i++) { u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino); @@ -629,7 +630,7 @@ int raw_tracepoint__sched_process_exit(void* ctx) struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) { struct var_kill_data_t* past_kill_data = &arr_struct->array[i]; diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 026d573ce1..86484f07e1 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -8,6 +8,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_compiler.h" #define FUNCTION_NAME_LEN 64 #define FILE_NAME_LEN 128 @@ -298,11 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) #if defined(USE_ITER) /* no for loop, no unrolling */ #elif defined(NO_UNROLL) -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #elif defined(UNROLL_COUNT) -#pragma clang loop unroll_count(UNROLL_COUNT) + __pragma_loop_unroll_count(UNROLL_COUNT) #else -#pragma clang loop unroll(full) + __pragma_loop_unroll_full #endif /* NO_UNROLL */ /* Unwind python stack */ #ifdef USE_ITER diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 14fb01437f..ab3a532b7d 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -319,3 +319,123 @@ int cross_rcu_region(void *ctx) bpf_rcu_read_unlock(); return 0; } + +__noinline +static int static_subprog(void *ctx) +{ + volatile int ret = 0; + + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog(NULL); +} + +__noinline +static int static_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog_lock(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog_lock(NULL); +} + +__noinline +static int static_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_unlock(); + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog_unlock(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog_unlock(NULL); +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + ret += static_subprog(ctx); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + ret += global_subprog(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + ret += static_subprog_lock(ctx); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + ret += global_subprog_lock(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += static_subprog_unlock(ctx); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += global_subprog_unlock(ret); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c index 4dfbc85525..1c7ab44bcc 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c @@ -27,8 +27,7 @@ int recvmsg_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS, sizeof(SERVUN_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c index 1f67e83266..d8869b03dd 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c @@ -28,8 +28,7 @@ int sendmsg_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 0; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 0; diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c index 3e745793b2..46d6eb2a3b 100644 --- a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c +++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c @@ -12,8 +12,6 @@ int cookie_found = 0; __u64 cookie = 0; __u32 omem = 0; -void *bpf_rdonly_cast(void *, __u32) __ksym; - struct { __uint(type, BPF_MAP_TYPE_SK_STORAGE); __uint(map_flags, BPF_F_NO_PREALLOC); @@ -29,7 +27,7 @@ int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage) if (local_storage_ptr != local_storage) return 0; - sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk_ptr, struct sock); if (sk->sk_cookie.counter != cookie) return 0; diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index ffbbfe1fa1..96531b0d9d 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -32,7 +32,7 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) if (!sk) return 0; - sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != AF_INET6 || sk->sk_state != TCP_LISTEN || !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) @@ -68,7 +68,7 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) if (!sk) return 0; - sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != AF_INET6 || !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) return 0; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 40df2cc26e..f74459eead 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -10,6 +10,8 @@ #include <linux/types.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + typedef uint32_t pid_t; struct task_struct {}; @@ -419,9 +421,9 @@ static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg, } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) { if (i >= map.cnt) @@ -560,25 +562,25 @@ static void *read_strobe_meta(struct task_struct *task, payload_off = sizeof(data->payload); #else #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_INTS; ++i) { read_int_var(cfg, i, tls_base, &value, data); } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_STRS; ++i) { payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off); } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_MAPS; ++i) { payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off); diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c new file mode 100644 index 0000000000..ba10c38962 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + test_1_result = 42; + return 0; +} + +SEC("struct_ops/test_1") +int BPF_PROG(test_2) +{ + return 0; +} + +struct bpf_testmod_ops___v1 { + int (*test_1)(void); +}; + +struct bpf_testmod_ops___v2 { + int (*test_1)(void); + int (*does_not_exist)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v1 testmod_1 = { + .test_1 = (void *)test_1 +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v2 testmod_2 = { + .test_1 = (void *)test_1, + .does_not_exist = (void *)test_2 +}; + +SEC("?.struct_ops") +struct bpf_testmod_ops___v1 optional_map = { + .test_1 = (void *)test_1, +}; + +SEC("?.struct_ops.link") +struct bpf_testmod_ops___v1 optional_map2 = { + .test_1 = (void *)test_1, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c new file mode 100644 index 0000000000..6049d9c902 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; + +SEC("?struct_ops/test_1") +int BPF_PROG(foo) +{ + test_1_result = 42; + return 0; +} + +SEC("?struct_ops/test_1") +int BPF_PROG(bar) +{ + test_1_result = 24; + return 0; +} + +struct bpf_testmod_ops { + int (*test_1)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)bar +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c new file mode 100644 index 0000000000..b450f72e74 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +pid_t tgid = 0; + +/* This is a test BPF program that uses struct_ops to access an argument + * that may be NULL. This is a test for the verifier to ensure that it can + * rip PTR_MAYBE_NULL correctly. + */ +SEC("struct_ops/test_maybe_null") +int BPF_PROG(test_maybe_null, int dummy, + struct task_struct *task) +{ + if (task) + tgid = task->tgid; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_maybe_null = (void *)test_maybe_null, +}; + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c new file mode 100644 index 0000000000..6283099ec3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +pid_t tgid = 0; + +SEC("struct_ops/test_maybe_null_struct_ptr") +int BPF_PROG(test_maybe_null_struct_ptr, int dummy, + struct task_struct *task) +{ + tgid = task->tgid; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_struct_ptr = { + .test_maybe_null = (void *)test_maybe_null_struct_ptr, +}; + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c new file mode 100644 index 0000000000..026cabfa7f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; +int test_2_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + test_1_result = 0xdeadbeef; + return 0; +} + +SEC("struct_ops/test_2") +void BPF_PROG(test_2, int a, int b) +{ + test_2_result = a + b; +} + +SEC("struct_ops/test_3") +int BPF_PROG(test_3, int a, int b) +{ + test_2_result = a + b + 3; + return a + b + 3; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, + .data = 0x1, +}; + +SEC("struct_ops/test_2") +void BPF_PROG(test_2_v2, int a, int b) +{ + test_2_result = a * b; +} + +struct bpf_testmod_ops___v2 { + int (*test_1)(void); + void (*test_2)(int a, int b); + int (*test_maybe_null)(int dummy, struct task_struct *task); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v2 testmod_2 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2_v2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c new file mode 100644 index 0000000000..9efcc6e4d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#define TRAMP(x) \ + SEC("struct_ops/tramp_" #x) \ + int BPF_PROG(tramp_ ## x, int a) \ + { \ + return a; \ + } + +TRAMP(1) +TRAMP(2) +TRAMP(3) +TRAMP(4) +TRAMP(5) +TRAMP(6) +TRAMP(7) +TRAMP(8) +TRAMP(9) +TRAMP(10) +TRAMP(11) +TRAMP(12) +TRAMP(13) +TRAMP(14) +TRAMP(15) +TRAMP(16) +TRAMP(17) +TRAMP(18) +TRAMP(19) +TRAMP(20) +TRAMP(21) +TRAMP(22) +TRAMP(23) +TRAMP(24) +TRAMP(25) +TRAMP(26) +TRAMP(27) +TRAMP(28) +TRAMP(29) +TRAMP(30) +TRAMP(31) +TRAMP(32) +TRAMP(33) +TRAMP(34) +TRAMP(35) +TRAMP(36) +TRAMP(37) +TRAMP(38) +TRAMP(39) +TRAMP(40) + +#define F_TRAMP(x) .tramp_ ## x = (void *)tramp_ ## x + +SEC(".struct_ops.link") +struct bpf_testmod_ops multi_pages = { + F_TRAMP(1), + F_TRAMP(2), + F_TRAMP(3), + F_TRAMP(4), + F_TRAMP(5), + F_TRAMP(6), + F_TRAMP(7), + F_TRAMP(8), + F_TRAMP(9), + F_TRAMP(10), + F_TRAMP(11), + F_TRAMP(12), + F_TRAMP(13), + F_TRAMP(14), + F_TRAMP(15), + F_TRAMP(16), + F_TRAMP(17), + F_TRAMP(18), + F_TRAMP(19), + F_TRAMP(20), + F_TRAMP(21), + F_TRAMP(22), + F_TRAMP(23), + F_TRAMP(24), + F_TRAMP(25), + F_TRAMP(26), + F_TRAMP(27), + F_TRAMP(28), + F_TRAMP(29), + F_TRAMP(30), + F_TRAMP(31), + F_TRAMP(32), + F_TRAMP(33), + F_TRAMP(34), + F_TRAMP(35), + F_TRAMP(36), + F_TRAMP(37), + F_TRAMP(38), + F_TRAMP(39), + F_TRAMP(40), +}; diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c index 4542dc683b..f1853c38aa 100644 --- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c +++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c @@ -27,23 +27,6 @@ struct { __type(value, long); } map_b SEC(".maps"); -SEC("fentry/bpf_local_storage_lookup") -int BPF_PROG(on_lookup) -{ - struct task_struct *task = bpf_get_current_task_btf(); - - if (!test_pid || task->pid != test_pid) - return 0; - - /* The bpf_task_storage_delete will call - * bpf_local_storage_lookup. The prog->active will - * stop the recursion. - */ - bpf_task_storage_delete(&map_a, task); - bpf_task_storage_delete(&map_b, task); - return 0; -} - SEC("fentry/bpf_local_storage_update") int BPF_PROG(on_update) { diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index 66b3049822..683c8aaa63 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -20,8 +20,11 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" #include "test_cls_redirect.h" +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" + #ifdef SUBPROGS #define INLINING __noinline #else @@ -267,7 +270,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph) uint32_t acc = 0; uint16_t *ipw = (uint16_t *)iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) { acc += ipw[i]; } @@ -294,7 +297,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt, }; *is_fragment = false; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 6; i++) { switch (exthdr.next) { case IPPROTO_FRAGMENT: diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index f41c81212e..da54c09e9a 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -23,6 +23,8 @@ #include "test_cls_redirect.h" #include "bpf_kfuncs.h" +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" + #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c index 22aba3f6e3..6fc8b9d66e 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c @@ -80,7 +80,7 @@ int test_core_type_id(void *ctx) * to detect whether this test has to be executed, however strange * that might look like. * - * [0] https://reviews.llvm.org/D85174 + * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99 */ #if __has_builtin(__builtin_preserve_type_info) struct core_reloc_type_id_output *out = (void *)&data.out; diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c index 69509f8bb6..6afa834756 100644 --- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c +++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c @@ -33,6 +33,12 @@ int BPF_PROG(tp_run) return 0; } +SEC("perf_event") +int event_run(void *ctx) +{ + return 0; +} + SEC("kprobe.multi") int BPF_PROG(kmulti_run) { diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 17a9f59bf5..fc69ff1888 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -5,7 +5,7 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#define MAX_STACK (512 - 3 * 32 + 8) +#define MAX_STACK 260 static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) @@ -30,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int); __attribute__ ((noinline)) int f2(int val, struct __sk_buff *skb) { + volatile char buf[MAX_STACK] = {}; + + __sink(buf[MAX_STACK - 1]); + return f1(skb) + f3(val, skb, 1); } @@ -44,7 +48,7 @@ int f3(int val, struct __sk_buff *skb, int var) } SEC("tc") -__failure __msg("combined stack size of 4 calls is 544") +__failure __msg("combined stack size of 3 calls is") int global_func1(struct __sk_buff *skb) { return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c index 9a06e5eb1f..143c8a4852 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c +++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c @@ -26,6 +26,23 @@ int kprobe_typedef_ctx(void *ctx) return kprobe_typedef_ctx_subprog(ctx); } +/* s390x defines: + * + * typedef user_pt_regs bpf_user_pt_regs_t; + * typedef struct { ... } user_pt_regs; + * + * And so "canonical" underlying struct type is anonymous. + * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs + * are: + * - bpf_user_pt_regs_t *ctx (typedef); + * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack); + * - void *ctx __arg_ctx (arg:ctx tag) + * + * Other architectures also allow using underlying struct types (e.g., + * `struct pt_regs *ctx` for x86-64) + */ +#ifndef bpf_target_s390 + #define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL))) __weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx) @@ -40,6 +57,8 @@ int kprobe_resolved_ctx(void *ctx) return kprobe_struct_ctx_subprog(ctx); } +#endif + /* this is current hack to make this work on old kernels */ struct bpf_user_pt_regs_t {}; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 48ff2b2ad5..fed66f36ad 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -6,6 +6,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" + /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) @@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, *pad_off = 0; // we can only go as far as ~10 TLVs due to the BPF max stack size - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 10; i++) { struct sr6_tlv_t tlv; @@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb) seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh)); - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (unsigned long long lo = 0; lo < 4; lo++) { seg->lo = bpf_cpu_to_be64(4 - lo); seg->hi = bpf_cpu_to_be64(hi); diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c index 4bdd65b5aa..2fdc44e766 100644 --- a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c +++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c @@ -6,13 +6,13 @@ char tp_name[128]; -SEC("lsm/bpf") +SEC("lsm.s/bpf") int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) { switch (cmd) { case BPF_RAW_TRACEPOINT_OPEN: - bpf_probe_read_user_str(tp_name, sizeof(tp_name) - 1, - (void *)attr->raw_tracepoint.name); + bpf_copy_from_user(tp_name, sizeof(tp_name) - 1, + (void *)attr->raw_tracepoint.name); break; default: break; diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index a7278f0643..5059050f74 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -6,6 +6,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" + /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) @@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, // we can only go as far as ~10 TLVs due to the BPF max stack size // workaround: define induction variable "i" as "long" instead // of "int" to prevent alu32 sub-register spilling. - #pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (long i = 0; i < 100; i++) { struct sr6_tlv_t tlv; diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h new file mode 100644 index 0000000000..5d3a7ec367 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_siphash.h @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#ifndef _TEST_SIPHASH_H +#define _TEST_SIPHASH_H + +/* include/linux/bitops.h */ +static inline u64 rol64(u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/* include/linux/siphash.h */ +#define SIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ + (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ + (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ + (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) + +#define SIPHASH_CONST_0 0x736f6d6570736575ULL +#define SIPHASH_CONST_1 0x646f72616e646f6dULL +#define SIPHASH_CONST_2 0x6c7967656e657261ULL +#define SIPHASH_CONST_3 0x7465646279746573ULL + +/* lib/siphash.c */ +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) + +#define PREAMBLE(len) \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} +#endif diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index c482110cfc..a724a70c67 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -3,12 +3,14 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + char _license[] SEC("license") = "GPL"; SEC("tc") int process(struct __sk_buff *skb) { - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 5; i++) { if (skb->cb[i] != i + 1) return 1; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index b2440a0ff4..d8d77bdffd 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -101,4 +101,69 @@ int bpf_spin_lock_test(struct __sk_buff *skb) err: return err; } + +struct bpf_spin_lock lockA __hidden SEC(".data.A"); + +__noinline +static int static_subprog(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + return ret; + return ret + ctx->len; +} + +__noinline +static int static_subprog_lock(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + ret = static_subprog(ctx); + bpf_spin_lock(&lockA); + return ret + ctx->len; +} + +__noinline +static int static_subprog_unlock(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + ret = static_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret + ctx->len; +} + +SEC("tc") +int lock_static_subprog_call(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = static_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("tc") +int lock_static_subprog_lock(struct __sk_buff *ctx) +{ + int ret = 0; + + ret = static_subprog_lock(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("tc") +int lock_static_subprog_unlock(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + ret = static_subprog_unlock(ctx); + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c index 86cd183ef6..43f40c4fe2 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -201,4 +201,48 @@ CHECK(innermapval_mapval, &iv->lock, &v->lock); #undef CHECK +__noinline +int global_subprog(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + ret += ctx->protocol; + return ret + ctx->mark; +} + +__noinline +static int static_subprog_call_global(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + return ret; + return ret + ctx->len + global_subprog(ctx); +} + +SEC("?tc") +int lock_global_subprog_call1(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = global_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("?tc") +int lock_global_subprog_call2(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = static_subprog_call_global(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 553a282d81..7f74077d66 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif @@ -30,7 +32,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -59,7 +61,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index 2b64bc563a..68a75436e8 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif @@ -30,7 +32,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index 5489823c83..efc3c61f78 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF @@ -31,7 +33,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index e6e678aa98..404124a938 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -19,6 +19,9 @@ #include <bpf/bpf_endian.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" static const int cfg_port = 8000; @@ -81,7 +84,7 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph) iph->check = 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++) csum += *iph16++; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c new file mode 100644 index 0000000000..c8e4553648 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include "bpf_tracing_net.h" +#include "bpf_kfuncs.h" +#include "test_siphash.h" +#include "test_tcp_custom_syncookie.h" + +#define MAX_PACKET_OFF 0xffff + +/* Hash is calculated for each client and split into ISN and TS. + * + * MSB LSB + * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 | + * | Hash_1 | MSS | ECN | SACK | WScale | + * + * TS: | 31 ... 8 | 7 ... 0 | + * | Random | Hash_2 | + */ +#define COOKIE_BITS 8 +#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1) + +enum { + /* 0xf is invalid thus means that SYN did not have WScale. */ + BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1, + BPF_SYNCOOKIE_SACK = (1 << 4), + BPF_SYNCOOKIE_ECN = (1 << 5), +}; + +#define MSS_LOCAL_IPV4 65495 +#define MSS_LOCAL_IPV6 65476 + +const __u16 msstab4[] = { + 536, + 1300, + 1460, + MSS_LOCAL_IPV4, +}; + +const __u16 msstab6[] = { + 1280 - 60, /* IPV6_MIN_MTU - 60 */ + 1480 - 60, + 9000 - 60, + MSS_LOCAL_IPV6, +}; + +static siphash_key_t test_key_siphash = { + { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL } +}; + +struct tcp_syncookie { + struct __sk_buff *skb; + void *data; + void *data_end; + struct ethhdr *eth; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + struct tcphdr *tcp; + __be32 *ptr32; + struct bpf_tcp_req_attrs attrs; + u32 off; + u32 cookie; + u64 first; +}; + +bool handled_syn, handled_ack; + +static int tcp_load_headers(struct tcp_syncookie *ctx) +{ + ctx->data = (void *)(long)ctx->skb->data; + ctx->data_end = (void *)(long)ctx->skb->data_end; + ctx->eth = (struct ethhdr *)(long)ctx->skb->data; + + if (ctx->eth + 1 > ctx->data_end) + goto err; + + switch (bpf_ntohs(ctx->eth->h_proto)) { + case ETH_P_IP: + ctx->ipv4 = (struct iphdr *)(ctx->eth + 1); + + if (ctx->ipv4 + 1 > ctx->data_end) + goto err; + + if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4) + goto err; + + if (ctx->ipv4->version != 4) + goto err; + + if (ctx->ipv4->protocol != IPPROTO_TCP) + goto err; + + ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1); + break; + case ETH_P_IPV6: + ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1); + + if (ctx->ipv6 + 1 > ctx->data_end) + goto err; + + if (ctx->ipv6->version != 6) + goto err; + + if (ctx->ipv6->nexthdr != NEXTHDR_TCP) + goto err; + + ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1); + break; + default: + goto err; + } + + if (ctx->tcp + 1 > ctx->data_end) + goto err; + + return 0; +err: + return -1; +} + +static int tcp_reload_headers(struct tcp_syncookie *ctx) +{ + /* Without volatile, + * R3 32-bit pointer arithmetic prohibited + */ + volatile u64 data_len = ctx->skb->data_end - ctx->skb->data; + + if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4) + goto err; + + /* Needed to calculate csum and parse TCP options. */ + if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0)) + goto err; + + ctx->data = (void *)(long)ctx->skb->data; + ctx->data_end = (void *)(long)ctx->skb->data_end; + ctx->eth = (struct ethhdr *)(long)ctx->skb->data; + if (ctx->ipv4) { + ctx->ipv4 = (struct iphdr *)(ctx->eth + 1); + ctx->ipv6 = NULL; + ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1); + } else { + ctx->ipv4 = NULL; + ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1); + ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1); + } + + if ((void *)ctx->tcp + 60 > ctx->data_end) + goto err; + + return 0; +err: + return -1; +} + +static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum) +{ + return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr, + ctx->tcp->doff * 4, IPPROTO_TCP, csum); +} + +static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum) +{ + return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr, + ctx->tcp->doff * 4, IPPROTO_TCP, csum); +} + +static int tcp_validate_header(struct tcp_syncookie *ctx) +{ + s64 csum; + + if (tcp_reload_headers(ctx)) + goto err; + + csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); + if (csum < 0) + goto err; + + if (ctx->ipv4) { + /* check tcp_v4_csum(csum) is 0 if not on lo. */ + + csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0); + if (csum < 0) + goto err; + + if (csum_fold(csum) != 0) + goto err; + } else if (ctx->ipv6) { + /* check tcp_v6_csum(csum) is 0 if not on lo. */ + } + + return 0; +err: + return -1; +} + +static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz) +{ + __u64 off = ctx->off; + __u8 *data; + + /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */ + if (off > MAX_PACKET_OFF - sz) + return NULL; + + data = ctx->data + off; + barrier_var(data); + if (data + sz >= ctx->data_end) + return NULL; + + ctx->off += sz; + return data; +} + +static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx) +{ + __u8 *opcode, *opsize, *wscale; + __u32 *tsval, *tsecr; + __u16 *mss; + __u32 off; + + off = ctx->off; + opcode = next(ctx, 1); + if (!opcode) + goto stop; + + if (*opcode == TCPOPT_EOL) + goto stop; + + if (*opcode == TCPOPT_NOP) + goto next; + + opsize = next(ctx, 1); + if (!opsize) + goto stop; + + if (*opsize < 2) + goto stop; + + switch (*opcode) { + case TCPOPT_MSS: + mss = next(ctx, 2); + if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss) + ctx->attrs.mss = get_unaligned_be16(mss); + break; + case TCPOPT_WINDOW: + wscale = next(ctx, 1); + if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) { + ctx->attrs.wscale_ok = 1; + ctx->attrs.snd_wscale = *wscale; + } + break; + case TCPOPT_TIMESTAMP: + tsval = next(ctx, 4); + tsecr = next(ctx, 4); + if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) { + ctx->attrs.rcv_tsval = get_unaligned_be32(tsval); + ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr); + + if (ctx->tcp->syn && ctx->attrs.rcv_tsecr) + ctx->attrs.tstamp_ok = 0; + else + ctx->attrs.tstamp_ok = 1; + } + break; + case TCPOPT_SACK_PERM: + if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn) + ctx->attrs.sack_ok = 1; + break; + } + + ctx->off = off + *opsize; +next: + return 0; +stop: + return 1; +} + +static void tcp_parse_options(struct tcp_syncookie *ctx) +{ + ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data, + + bpf_loop(40, tcp_parse_option, ctx, 0); +} + +static int tcp_validate_sysctl(struct tcp_syncookie *ctx) +{ + if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) || + (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6)) + goto err; + + if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7) + goto err; + + if (!ctx->attrs.tstamp_ok) + goto err; + + if (!ctx->attrs.sack_ok) + goto err; + + if (!ctx->tcp->ece || !ctx->tcp->cwr) + goto err; + + return 0; +err: + return -1; +} + +static void tcp_prepare_cookie(struct tcp_syncookie *ctx) +{ + u32 seq = bpf_ntohl(ctx->tcp->seq); + u64 first = 0, second; + int mssind = 0; + u32 hash; + + if (ctx->ipv4) { + for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--) + if (ctx->attrs.mss >= msstab4[mssind]) + break; + + ctx->attrs.mss = msstab4[mssind]; + + first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr; + } else if (ctx->ipv6) { + for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--) + if (ctx->attrs.mss >= msstab6[mssind]) + break; + + ctx->attrs.mss = msstab6[mssind]; + + first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 | + ctx->ipv6->daddr.in6_u.u6_addr32[0]; + } + + second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest; + hash = siphash_2u64(first, second, &test_key_siphash); + + if (ctx->attrs.tstamp_ok) { + ctx->attrs.rcv_tsecr = bpf_get_prandom_u32(); + ctx->attrs.rcv_tsecr &= ~COOKIE_MASK; + ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK; + } + + hash &= ~COOKIE_MASK; + hash |= mssind << 6; + + if (ctx->attrs.wscale_ok) + hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK; + + if (ctx->attrs.sack_ok) + hash |= BPF_SYNCOOKIE_SACK; + + if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr) + hash |= BPF_SYNCOOKIE_ECN; + + ctx->cookie = hash; +} + +static void tcp_write_options(struct tcp_syncookie *ctx) +{ + ctx->ptr32 = (__be32 *)(ctx->tcp + 1); + + *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 | + ctx->attrs.mss); + + if (ctx->attrs.wscale_ok) + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_WINDOW << 16 | + TCPOLEN_WINDOW << 8 | + ctx->attrs.snd_wscale); + + if (ctx->attrs.tstamp_ok) { + if (ctx->attrs.sack_ok) + *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 | + TCPOLEN_SACK_PERM << 16 | + TCPOPT_TIMESTAMP << 8 | + TCPOLEN_TIMESTAMP); + else + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_NOP << 16 | + TCPOPT_TIMESTAMP << 8 | + TCPOLEN_TIMESTAMP); + + *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr); + *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval); + } else if (ctx->attrs.sack_ok) { + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_NOP << 16 | + TCPOPT_SACK_PERM << 8 | + TCPOLEN_SACK_PERM); + } +} + +static int tcp_handle_syn(struct tcp_syncookie *ctx) +{ + s64 csum; + + if (tcp_validate_header(ctx)) + goto err; + + tcp_parse_options(ctx); + + if (tcp_validate_sysctl(ctx)) + goto err; + + tcp_prepare_cookie(ctx); + tcp_write_options(ctx); + + swap(ctx->tcp->source, ctx->tcp->dest); + ctx->tcp->check = 0; + ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1); + ctx->tcp->seq = bpf_htonl(ctx->cookie); + ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2; + ctx->tcp->ack = 1; + if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr) + ctx->tcp->ece = 0; + ctx->tcp->cwr = 0; + + csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); + if (csum < 0) + goto err; + + if (ctx->ipv4) { + swap(ctx->ipv4->saddr, ctx->ipv4->daddr); + ctx->tcp->check = tcp_v4_csum(ctx, csum); + + ctx->ipv4->check = 0; + ctx->ipv4->tos = 0; + ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4); + ctx->ipv4->id = 0; + ctx->ipv4->ttl = 64; + + csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0); + if (csum < 0) + goto err; + + ctx->ipv4->check = csum_fold(csum); + } else if (ctx->ipv6) { + swap(ctx->ipv6->saddr, ctx->ipv6->daddr); + ctx->tcp->check = tcp_v6_csum(ctx, csum); + + *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000); + ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp); + ctx->ipv6->hop_limit = 64; + } + + swap_array(ctx->eth->h_source, ctx->eth->h_dest); + + if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0)) + goto err; + + return bpf_redirect(ctx->skb->ifindex, 0); +err: + return TC_ACT_SHOT; +} + +static int tcp_validate_cookie(struct tcp_syncookie *ctx) +{ + u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1; + u32 seq = bpf_ntohl(ctx->tcp->seq) - 1; + u64 first = 0, second; + int mssind; + u32 hash; + + if (ctx->ipv4) + first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr; + else if (ctx->ipv6) + first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 | + ctx->ipv6->daddr.in6_u.u6_addr32[0]; + + second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest; + hash = siphash_2u64(first, second, &test_key_siphash); + + if (ctx->attrs.tstamp_ok) + hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK; + else + hash &= ~COOKIE_MASK; + + hash -= cookie & ~COOKIE_MASK; + if (hash) + goto err; + + mssind = (cookie & (3 << 6)) >> 6; + if (ctx->ipv4) { + if (mssind > ARRAY_SIZE(msstab4)) + goto err; + + ctx->attrs.mss = msstab4[mssind]; + } else { + if (mssind > ARRAY_SIZE(msstab6)) + goto err; + + ctx->attrs.mss = msstab6[mssind]; + } + + ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK; + ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale; + ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK; + ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK; + ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN; + + return 0; +err: + return -1; +} + +static int tcp_handle_ack(struct tcp_syncookie *ctx) +{ + struct bpf_sock_tuple tuple; + struct bpf_sock *skc; + int ret = TC_ACT_OK; + struct sock *sk; + u32 tuple_size; + + if (ctx->ipv4) { + tuple.ipv4.saddr = ctx->ipv4->saddr; + tuple.ipv4.daddr = ctx->ipv4->daddr; + tuple.ipv4.sport = ctx->tcp->source; + tuple.ipv4.dport = ctx->tcp->dest; + tuple_size = sizeof(tuple.ipv4); + } else if (ctx->ipv6) { + __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr)); + __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr)); + tuple.ipv6.sport = ctx->tcp->source; + tuple.ipv6.dport = ctx->tcp->dest; + tuple_size = sizeof(tuple.ipv6); + } else { + goto out; + } + + skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0); + if (!skc) + goto out; + + if (skc->state != TCP_LISTEN) + goto release; + + sk = (struct sock *)bpf_skc_to_tcp_sock(skc); + if (!sk) + goto err; + + if (tcp_validate_header(ctx)) + goto err; + + tcp_parse_options(ctx); + + if (tcp_validate_cookie(ctx)) + goto err; + + ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs)); + if (ret < 0) + goto err; + +release: + bpf_sk_release(skc); +out: + return ret; + +err: + ret = TC_ACT_SHOT; + goto release; +} + +SEC("tc") +int tcp_custom_syncookie(struct __sk_buff *skb) +{ + struct tcp_syncookie ctx = { + .skb = skb, + }; + + if (tcp_load_headers(&ctx)) + return TC_ACT_OK; + + if (ctx.tcp->rst) + return TC_ACT_OK; + + if (ctx.tcp->syn) { + if (ctx.tcp->ack) + return TC_ACT_OK; + + handled_syn = true; + + return tcp_handle_syn(&ctx); + } + + handled_ack = true; + + return tcp_handle_ack(&ctx); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h new file mode 100644 index 0000000000..29a6a53cf2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#ifndef _TEST_TCP_SYNCOOKIE_H +#define _TEST_TCP_SYNCOOKIE_H + +#define __packed __attribute__((__packed__)) +#define __force + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define swap(a, b) \ + do { \ + typeof(a) __tmp = (a); \ + (a) = (b); \ + (b) = __tmp; \ + } while (0) + +#define swap_array(a, b) \ + do { \ + typeof(a) __tmp[sizeof(a)]; \ + __builtin_memcpy(__tmp, a, sizeof(a)); \ + __builtin_memcpy(a, b, sizeof(a)); \ + __builtin_memcpy(b, __tmp, sizeof(a)); \ + } while (0) + +/* asm-generic/unaligned.h */ +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \ + __pptr->x; \ +}) + +#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) + +static inline u16 get_unaligned_be16(const void *p) +{ + return bpf_ntohs(__get_unaligned_t(__be16, p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return bpf_ntohl(__get_unaligned_t(__be32, p)); +} + +/* lib/checksum.c */ +static inline u32 from64to32(u64 x) +{ + /* add up 32-bit and 32-bit for 32+c bit */ + x = (x & 0xffffffff) + (x >> 32); + /* add up carry.. */ + x = (x & 0xffffffff) + (x >> 32); + return (u32)x; +} + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long long s = (__force u32)sum; + + s += (__force u32)saddr; + s += (__force u32)daddr; +#ifdef __BIG_ENDIAN + s += proto + len; +#else + s += (proto + len) << 8; +#endif + return (__force __wsum)from64to32(s); +} + +/* asm-generic/checksum.h */ +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* net/ipv6/ip6_checksum.c */ +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + int carry; + __u32 ulen; + __u32 uproto; + __u32 sum = (__force u32)csum; + + sum += (__force u32)saddr->in6_u.u6_addr32[0]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[1]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[2]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[3]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[0]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[1]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[2]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[3]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]); + sum += carry; + + ulen = (__force u32)bpf_htonl((__u32)len); + sum += ulen; + carry = (sum < ulen); + sum += carry; + + uproto = (__force u32)bpf_htonl(proto); + sum += uproto; + carry = (sum < uproto); + sum += carry; + + return csum_fold((__force __wsum)sum); +} +#endif diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index cf7ed8cbb1..a3f3f43fc1 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -59,7 +59,7 @@ int bpf_testcb(struct bpf_sock_ops *skops) asm volatile ( "%[op] = *(u32 *)(%[skops] +96)" - : [op] "+r"(op) + : [op] "=r"(op) : [skops] "r"(skops) :); diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index d7a9a74b72..8caf58be58 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -19,6 +19,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" +#include "bpf_compiler.h" struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); @@ -137,7 +138,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp) iph->ttl = 8; next_iph = (__u16 *)iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < sizeof(*iph) >> 1; i++) csum += *next_iph++; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c index 78c368e717..67a77944ef 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -18,11 +18,11 @@ #include "test_iptunnel_common.h" #include "bpf_kfuncs.h" -const size_t tcphdr_sz = sizeof(struct tcphdr); -const size_t udphdr_sz = sizeof(struct udphdr); -const size_t ethhdr_sz = sizeof(struct ethhdr); -const size_t iphdr_sz = sizeof(struct iphdr); -const size_t ipv6hdr_sz = sizeof(struct ipv6hdr); +#define tcphdr_sz sizeof(struct tcphdr) +#define udphdr_sz sizeof(struct udphdr) +#define ethhdr_sz sizeof(struct ethhdr) +#define iphdr_sz sizeof(struct iphdr) +#define ipv6hdr_sz sizeof(struct ipv6hdr) struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c index c98fb44156..93267a6882 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -15,6 +15,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" +#include "bpf_compiler.h" struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); @@ -133,7 +134,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp) iph->ttl = 8; next_iph = (__u16 *)iph; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(*iph) >> 1; i++) csum += *next_iph++; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index 42c8f6ded0..5c7e4758a0 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -15,6 +15,7 @@ #include <linux/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" static __always_inline __u32 rol32(__u32 word, unsigned int shift) { @@ -362,7 +363,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, iph->ttl = 4; next_iph_u16 = (__u16 *) iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) csum += *next_iph_u16++; iph->check = ~((csum & 0xffff) + (csum >> 16)); @@ -409,7 +410,7 @@ int send_icmp_reply(void *data, void *data_end) iph->saddr = tmp_addr; iph->check = 0; next_iph_u16 = (__u16 *) iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) csum += *next_iph_u16++; iph->check = ~((csum & 0xffff) + (csum >> 16)); diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c new file mode 100644 index 0000000000..e4d59b6ba7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/token_lsm.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int my_pid; +bool reject_capable; +bool reject_cmd; + +SEC("lsm/bpf_token_capable") +int BPF_PROG(token_capable, struct bpf_token *token, int cap) +{ + if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + if (reject_capable) + return -1; + return 0; +} + +SEC("lsm/bpf_token_cmd") +int BPF_PROG(token_cmd, struct bpf_token *token, enum bpf_cmd cmd) +{ + if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + if (reject_cmd) + return -1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c new file mode 100644 index 0000000000..d41665d2ec --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +SEC("?fentry/bpf_spin_lock") +int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock) +{ + return 0; +} + +SEC("?fentry/bpf_spin_unlock") +int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 694e7cec18..5fda439010 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -33,6 +33,27 @@ int bench_trigger_kprobe(void *ctx) return 0; } +SEC("kretprobe/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kretprobe(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + +SEC("kprobe.multi/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kprobe_multi(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + +SEC("kretprobe.multi/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kretprobe_multi(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + SEC("fentry/" SYS_PREFIX "sys_getpgid") int bench_trigger_fentry(void *ctx) { @@ -40,6 +61,13 @@ int bench_trigger_fentry(void *ctx) return 0; } +SEC("fexit/" SYS_PREFIX "sys_getpgid") +int bench_trigger_fexit(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + SEC("fentry.s/" SYS_PREFIX "sys_getpgid") int bench_trigger_fentry_sleep(void *ctx) { diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c index a9629ac230..9d808b8f4a 100644 --- a/tools/testing/selftests/bpf/progs/type_cast.c +++ b/tools/testing/selftests/bpf/progs/type_cast.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" struct { __uint(type, BPF_MAP_TYPE_TASK_STORAGE); @@ -19,9 +20,6 @@ char name[IFNAMSIZ]; unsigned int inum; unsigned int meta_len, frag0_len, kskb_len, kskb2_len; -void *bpf_cast_to_kern_ctx(void *) __ksym; -void *bpf_rdonly_cast(void *, __u32) __ksym; - SEC("?xdp") int md_xdp(struct xdp_md *ctx) { @@ -48,13 +46,12 @@ int md_skb(struct __sk_buff *skb) /* Simulate the following kernel macro: * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) */ - shared_info = bpf_rdonly_cast(kskb->head + kskb->end, - bpf_core_type_id_kernel(struct skb_shared_info)); + shared_info = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info); meta_len = shared_info->meta_len; frag0_len = shared_info->frag_list->len; /* kskb2 should be equal to kskb */ - kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff)); + kskb2 = bpf_core_cast(kskb, typeof(*kskb2)); kskb2_len = kskb2->len; return 0; } @@ -65,7 +62,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) struct task_struct *task, *task_dup; task = bpf_get_current_task_btf(); - task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); + task_dup = bpf_core_cast(task, struct task_struct); (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0); return 0; } @@ -73,7 +70,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) SEC("?tracepoint/syscalls/sys_enter_nanosleep") int kctx_u64(void *ctx) { - u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64)); + u64 *kctx = bpf_core_cast(ctx, u64); (void)kctx; return 0; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c new file mode 100644 index 0000000000..93144ae6df --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" +#include "bpf_arena_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ +#else + __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +SEC("syscall") +__success __retval(0) +int basic_alloc1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile int __arena *page1, *page2, *no_page, *page3; + + page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page1) + return 1; + *page1 = 1; + page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page2) + return 2; + *page2 = 2; + no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (no_page) + return 3; + if (*page1 != 1) + return 4; + if (*page2 != 2) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)page2, 1); + if (*page1 != 1) + return 6; + if (*page2 != 0) /* use-after-free should return 0 */ + return 7; + page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page3) + return 8; + *page3 = 3; + if (page2 != page3) + return 9; + if (*page1 != 1) + return 10; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int basic_alloc2(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page1, *page2, *page3, *page4; + + page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); + if (!page1) + return 1; + page2 = page1 + __PAGE_SIZE; + page3 = page1 + __PAGE_SIZE * 2; + page4 = page1 - __PAGE_SIZE; + *page1 = 1; + *page2 = 2; + *page3 = 3; + *page4 = 4; + if (*page1 != 1) + return 1; + if (*page2 != 2) + return 2; + if (*page3 != 0) + return 3; + if (*page4 != 0) + return 4; + bpf_arena_free_pages(&arena, (void __arena *)page1, 2); + if (*page1 != 0) + return 5; + if (*page2 != 0) + return 6; + if (*page3 != 0) + return 7; + if (*page4 != 0) + return 8; +#endif + return 0; +} + +struct bpf_arena___l { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +SEC("syscall") +__success __retval(0) __log_level(2) +int basic_alloc3(void *ctx) +{ + struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; + volatile char __arena *pages; + + pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); + if (!pages) + return 1; + return 0; +} + +SEC("iter.s/bpf_map") +__success __log_level(2) +int iter_maps1(struct bpf_iter__bpf_map *ctx) +{ + struct bpf_map *map = ctx->map; + + if (!map) + return 0; + bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0); + return 0; +} + +SEC("iter.s/bpf_map") +__failure __msg("expected pointer to STRUCT bpf_map") +int iter_maps2(struct bpf_iter__bpf_map *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + + bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0); + return 0; +} + +SEC("iter.s/bpf_map") +__failure __msg("untrusted_ptr_bpf_map") +int iter_maps3(struct bpf_iter__bpf_map *ctx) +{ + struct bpf_map *map = ctx->map; + + if (!map) + return 0; + bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c new file mode 100644 index 0000000000..ef66ea4602 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" +#include "bpf_arena_common.h" + +#define ARENA_SIZE (1ull << 32) + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, ARENA_SIZE / PAGE_SIZE); +} arena SEC(".maps"); + +SEC("syscall") +__success __retval(0) +int big_alloc1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page1, *page2, *no_page, *page3; + void __arena *base; + + page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page1) + return 1; + *page1 = 1; + page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, + 1, NUMA_NO_NODE, 0); + if (!page2) + return 2; + *page2 = 2; + no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE, + 1, NUMA_NO_NODE, 0); + if (no_page) + return 3; + if (*page1 != 1) + return 4; + if (*page2 != 2) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)page1, 1); + if (*page2 != 2) + return 6; + if (*page1 != 0) /* use-after-free should return 0 */ + return 7; + page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page3) + return 8; + *page3 = 3; + if (page1 != page3) + return 9; + if (*page2 != 2) + return 10; + if (*(page1 + PAGE_SIZE) != 0) + return 11; + if (*(page1 - PAGE_SIZE) != 0) + return 12; + if (*(page2 + PAGE_SIZE) != 0) + return 13; + if (*(page2 - PAGE_SIZE) != 0) + return 14; +#endif + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c index be95570ab3..28b602ac9c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c @@ -568,7 +568,7 @@ l0_%=: r0 = 0; \ SEC("tc") __description("direct packet access: test23 (x += pkt_ptr, 4)") -__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)") +__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)") __flag(BPF_F_ANY_ALIGNMENT) __naked void test23_x_pkt_ptr_4(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c new file mode 100644 index 0000000000..4ab0ef18d7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "xdp_metadata.h" +#include "bpf_kfuncs.h" + +extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +extern void bpf_task_release(struct task_struct *p) __ksym __weak; + +__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable) +{ + if (!task) + return 0; + return task->pid + task->tgid; +} + +__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable) +{ + return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL); +} + +SEC("?tp_btf/task_newtask") +__success __log_level(2) +__msg("Validating subprog_trusted_task_nullable() func#1...") +__msg(": R1=trusted_ptr_or_null_task_struct(") +int trusted_task_arg_nullable(void *ctx) +{ + struct task_struct *t1 = bpf_get_current_task_btf(); + struct task_struct *t2 = bpf_task_acquire(t1); + int res = 0; + + /* known NULL */ + res += subprog_trusted_task_nullable(NULL); + + /* known non-NULL */ + res += subprog_trusted_task_nullable(t1); + res += subprog_trusted_task_nullable_extra_layer(t1); + + /* unknown if NULL or not */ + res += subprog_trusted_task_nullable(t2); + res += subprog_trusted_task_nullable_extra_layer(t2); + + if (t2) { + /* known non-NULL after explicit NULL check, just in case */ + res += subprog_trusted_task_nullable(t2); + res += subprog_trusted_task_nullable_extra_layer(t2); + + bpf_task_release(t2); + } + + return res; +} + +__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted) +{ + return task->pid + task->tgid; +} + +SEC("?kprobe") +__failure __log_level(2) +__msg("R1 type=scalar expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')") +int trusted_task_arg_nonnull_fail1(void *ctx) +{ + return subprog_trusted_task_nonnull(NULL); +} + +SEC("?tp_btf/task_newtask") +__failure __log_level(2) +__msg("R1 type=ptr_or_null_ expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')") +int trusted_task_arg_nonnull_fail2(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + struct task_struct *nullable; + int res; + + nullable = bpf_task_acquire(t); + + /* should fail, PTR_TO_BTF_ID_OR_NULL */ + res = subprog_trusted_task_nonnull(nullable); + + if (nullable) + bpf_task_release(nullable); + + return res; +} + +SEC("?kprobe") +__success __log_level(2) +__msg("Validating subprog_trusted_task_nonnull() func#1...") +__msg(": R1=trusted_ptr_task_struct(") +int trusted_task_arg_nonnull(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + + return subprog_trusted_task_nonnull(t); +} + +struct task_struct___local {} __attribute__((preserve_access_index)); + +__weak int subprog_nullable_task_flavor( + struct task_struct___local *task __arg_trusted __arg_nullable) +{ + char buf[16]; + + if (!task) + return 0; + + return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0); +} + +SEC("?uprobe.s") +__success __log_level(2) +__msg("Validating subprog_nullable_task_flavor() func#1...") +__msg(": R1=trusted_ptr_or_null_task_struct(") +int flavor_ptr_nullable(void *ctx) +{ + struct task_struct___local *t = (void *)bpf_get_current_task_btf(); + + return subprog_nullable_task_flavor(t); +} + +__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted) +{ + char buf[16]; + + return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0); +} + +SEC("?uprobe.s") +__success __log_level(2) +__msg("Validating subprog_nonnull_task_flavor() func#1...") +__msg(": R1=trusted_ptr_task_struct(") +int flavor_ptr_nonnull(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + + return subprog_nonnull_task_flavor((void *)t); +} + +__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted) +{ + bpf_task_release(task); /* should be rejected */ + + return 0; +} + +SEC("?tp_btf/task_newtask") +__failure __log_level(2) +__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID") +int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags) +{ + return subprog_trusted_destroy(task); +} + +__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted) +{ + struct task_struct *owned; + + owned = bpf_task_acquire(task); + if (!owned) + return 0; + + bpf_task_release(owned); /* this one is OK, we acquired it locally */ + + return 0; +} + +SEC("?tp_btf/task_newtask") +__success __log_level(2) +int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags) +{ + return subprog_trusted_acq_rel(task); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index 67dddd9418..baff5ffe94 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -115,6 +115,35 @@ int arg_tag_nullable_ptr_fail(void *ctx) return subprog_nullable_ptr_bad(&x); } +typedef struct { + int x; +} user_struct_t; + +__noinline __weak int subprog_user_anon_mem(user_struct_t *t) +{ + return t ? t->x : 0; +} + +SEC("?tracepoint") +__failure __log_level(2) +__msg("invalid bpf_context access") +__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')") +int anon_user_mem_invalid(void *ctx) +{ + /* can't pass PTR_TO_CTX as user memory */ + return subprog_user_anon_mem(ctx); +} + +SEC("?tracepoint") +__success __log_level(2) +__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype") +int anon_user_mem_valid(void *ctx) +{ + user_struct_t t = { .x = 42 }; + + return subprog_user_anon_mem(&t); +} + __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull) { return (*p1) * (*p2); /* good, no need for NULL checks */ diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index a955a63582..99e561f18f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 - -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_experimental.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -309,4 +307,103 @@ int iter_limit_bug(struct __sk_buff *skb) return 0; } +#define ARR_SZ 1000000 +int zero; +char arr[ARR_SZ]; + +SEC("socket") +__success __retval(0xd495cdc0) +int cond_break1(const void *ctx) +{ + unsigned long i; + unsigned int sum = 0; + + for (i = zero; i < ARR_SZ; cond_break, i++) + sum += i; + for (i = zero; i < ARR_SZ; i++) { + barrier_var(i); + sum += i + arr[i]; + cond_break; + } + + return sum; +} + +SEC("socket") +__success __retval(999000000) +int cond_break2(const void *ctx) +{ + int i, j; + int sum = 0; + + for (i = zero; i < 1000; cond_break, i++) + for (j = zero; j < 1000; j++) { + sum += i + j; + cond_break; + } + + return sum; +} + +static __noinline int loop(void) +{ + int i, sum = 0; + + for (i = zero; i <= 1000000; i++, cond_break) + sum += i; + + return sum; +} + +SEC("socket") +__success __retval(0x6a5a2920) +int cond_break3(const void *ctx) +{ + return loop(); +} + +SEC("socket") +__success __retval(1) +int cond_break4(const void *ctx) +{ + int cnt = zero; + + for (;;) { + /* should eventually break out of the loop */ + cond_break; + cnt++; + } + /* if we looped a bit, it's a success */ + return cnt > 1 ? 1 : 0; +} + +static __noinline int static_subprog(void) +{ + int cnt = zero; + + for (;;) { + cond_break; + cnt++; + } + + return cnt; +} + +SEC("socket") +__success __retval(1) +int cond_break5(const void *ctx) +{ + int cnt1 = zero, cnt2; + + for (;;) { + cond_break; + cnt1++; + } + + cnt2 = static_subprog(); + + /* main and subprog have to loop a bit */ + return cnt1 > 1 && cnt2 > 1 ? 1 : 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c index 71735dbf33..e07b43b78f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_loops1.c +++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c @@ -259,4 +259,28 @@ l0_%=: r2 += r1; \ " ::: __clobber_all); } +SEC("xdp") +__success +__naked void not_an_inifinite_loop(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + *(u64 *)(r10 - 8) = r0; \ + r0 = 0; \ +loop_%=: \ + r0 = *(u64 *)(r10 - 8); \ + if r0 > 10 goto exit_%=; \ + r0 += 1; \ + *(u64 *)(r10 - 8) = r0; \ + r0 = 0; \ + goto loop_%=; \ +exit_%=: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 39fe3372e0..85e48069c9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void) SEC("tc") __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") -__failure __msg("invalid access to packet") +__success __retval(0) __naked void u16_offset_to_skb_data(void) { asm volatile (" \ @@ -225,13 +225,19 @@ __naked void u16_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \ - r4 = *(u16*)(r10 - 8); \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r4 = *(u16*)(r10 - 8);" +#else + "r4 = *(u16*)(r10 - 6);" +#endif + " \ r0 = r2; \ - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\ r0 += r4; \ - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ if r0 > r3 goto l0_%=; \ - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ r0 = *(u32*)(r2 + 0); \ l0_%=: r0 = 0; \ exit; \ @@ -243,7 +249,7 @@ l0_%=: r0 = 0; \ SEC("tc") __description("Spill u32 const scalars. Refill as u64. Offset to skb->data") -__failure __msg("invalid access to packet") +__failure __msg("math between pkt pointer and register with unbounded min value is not allowed") __naked void u64_offset_to_skb_data(void) { asm volatile (" \ @@ -253,13 +259,11 @@ __naked void u64_offset_to_skb_data(void) w7 = 20; \ *(u32*)(r10 - 4) = r6; \ *(u32*)(r10 - 8) = r7; \ - r4 = *(u16*)(r10 - 8); \ + r4 = *(u64*)(r10 - 8); \ r0 = r2; \ - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \ r0 += r4; \ - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ if r0 > r3 goto l0_%=; \ - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ r0 = *(u32*)(r2 + 0); \ l0_%=: r0 = 0; \ exit; \ @@ -270,7 +274,7 @@ l0_%=: r0 = 0; \ } SEC("tc") -__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") +__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data") __failure __msg("invalid access to packet") __naked void _6_offset_to_skb_data(void) { @@ -279,7 +283,13 @@ __naked void _6_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \ - r4 = *(u16*)(r10 - 6); \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r4 = *(u16*)(r10 - 6);" +#else + "r4 = *(u16*)(r10 - 8);" +#endif + " \ r0 = r2; \ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ r0 += r4; \ @@ -454,9 +464,9 @@ l0_%=: r1 >>= 16; \ SEC("raw_tp") __log_level(2) __success -__msg("fp-8=0m??mmmm") -__msg("fp-16=00mm??mm") -__msg("fp-24=00mm???m") +__msg("fp-8=0m??scalar()") +__msg("fp-16=00mm??scalar()") +__msg("fp-24=00mm???scalar()") __naked void spill_subregs_preserve_stack_zero(void) { asm volatile ( @@ -495,14 +505,14 @@ char single_byte_buf[1] SEC(".data.single_byte_buf"); SEC("raw_tp") __log_level(2) __success -/* make sure fp-8 is all STACK_ZERO */ -__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=00000000") +/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */ +__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0") /* but fp-16 is spilled IMPRECISE zero const reg */ __msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0") -/* validate that assigning R2 from STACK_ZERO doesn't mark register +/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register * precise immediately; if necessary, it will be marked precise later */ -__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=00000000") +__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0") /* similarly, when R2 is assigned from spilled register, it is initially * imprecise, but will be marked precise later once it is used in precise context */ @@ -520,14 +530,14 @@ __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0") __naked void partial_stack_load_preserves_zeros(void) { asm volatile ( - /* fp-8 is all STACK_ZERO */ + /* fp-8 is value zero (represented by a zero value fake reg) */ ".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */ /* fp-16 is const zero register */ "r0 = 0;" "*(u64 *)(r10 -16) = r0;" - /* load single U8 from non-aligned STACK_ZERO slot */ + /* load single U8 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u8 *)(r10 -1);" "r1 += r2;" @@ -539,7 +549,7 @@ __naked void partial_stack_load_preserves_zeros(void) "r1 += r2;" "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ - /* load single U16 from non-aligned STACK_ZERO slot */ + /* load single U16 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u16 *)(r10 -2);" "r1 += r2;" @@ -551,7 +561,7 @@ __naked void partial_stack_load_preserves_zeros(void) "r1 += r2;" "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ - /* load single U32 from non-aligned STACK_ZERO slot */ + /* load single U32 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u32 *)(r10 -4);" "r1 += r2;" @@ -583,6 +593,47 @@ __naked void partial_stack_load_preserves_zeros(void) : __clobber_common); } +SEC("raw_tp") +__log_level(2) +__success +/* fp-4 is STACK_ZERO */ +__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????") +__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????") +__msg("5: (0f) r1 += r2") +__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)") +__naked void partial_stack_load_preserves_partial_zeros(void) +{ + asm volatile ( + /* fp-4 is value zero */ + ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */ + + /* load single U8 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u8 *)(r10 -1);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + /* load single U16 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u16 *)(r10 -2);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + /* load single U32 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u32 *)(r10 -4);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + "r0 = 0;" + "exit;" + : + : __imm_ptr(single_byte_buf), + __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0)) + : __clobber_common); +} + char two_byte_buf[2] SEC(".data.two_byte_buf"); SEC("raw_tp") @@ -737,4 +788,460 @@ __naked void stack_load_preserves_const_precision_subreg(void) : __clobber_common); } +SEC("xdp") +__description("32-bit spilled reg range should be tracked") +__success __retval(0) +__naked void spill_32bit_range_track(void) +{ + asm volatile(" \ + call %[bpf_ktime_get_ns]; \ + /* Make r0 bounded. */ \ + r0 &= 65535; \ + /* Assign an ID to r0. */ \ + r1 = r0; \ + /* 32-bit spill r0 to stack. */ \ + *(u32*)(r10 - 8) = r0; \ + /* Boundary check on r0. */ \ + if r0 < 1 goto l0_%=; \ + /* 32-bit fill r1 from stack. */ \ + r1 = *(u32*)(r10 - 8); \ + /* r1 == r0 => r1 >= 1 always. */ \ + if r1 >= 1 goto l0_%=; \ + /* Dead branch: the verifier should prune it. \ + * Do an invalid memory access if the verifier \ + * follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("xdp") +__description("64-bit spill of 64-bit reg should assign ID") +__success __retval(0) +__naked void spill_64bit_of_64bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x80000000; \ + r0 <<= 32; \ + /* 64-bit spill r0 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r0; \ + /* 64-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u64*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit spill of 32-bit reg should assign ID") +__success __retval(0) +__naked void spill_32bit_of_32bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + w0 &= 0x80000000; \ + /* 32-bit spill r0 to stack - should assign an ID. */\ + *(u32*)(r10 - 8) = r0; \ + /* 32-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u32*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("16-bit spill of 16-bit reg should assign ID") +__success __retval(0) +__naked void spill_16bit_of_16bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x8000; \ + /* 16-bit spill r0 to stack - should assign an ID. */\ + *(u16*)(r10 - 8) = r0; \ + /* 16-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u16*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("8-bit spill of 8-bit reg should assign ID") +__success __retval(0) +__naked void spill_8bit_of_8bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x80; \ + /* 8-bit spill r0 to stack - should assign an ID. */\ + *(u8*)(r10 - 8) = r0; \ + /* 8-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u8*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("spill unbounded reg, then range check src") +__success __retval(0) +__naked void spill_unbounded(void) +{ + asm volatile (" \ + /* Produce an unbounded scalar. */ \ + call %[bpf_get_prandom_u32]; \ + /* Spill r0 to stack. */ \ + *(u64*)(r10 - 8) = r0; \ + /* Boundary check on r0. */ \ + if r0 > 16 goto l0_%=; \ + /* Fill r0 from stack. */ \ + r0 = *(u64*)(r10 - 8); \ + /* Boundary check on r0 with predetermined result. */\ + if r0 <= 16 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill") +__success __retval(0) +__naked void fill_32bit_after_spill_64bit(void) +{ + asm volatile(" \ + /* Randomize the upper 32 bits. */ \ + call %[bpf_get_prandom_u32]; \ + r0 <<= 32; \ + /* 64-bit spill r0 to stack. */ \ + *(u64*)(r10 - 8) = r0; \ + /* 32-bit fill r0 from stack. */ \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(u32*)(r10 - 8);" +#else + "r0 = *(u32*)(r10 - 4);" +#endif + " \ + /* Boundary check on r0 with predetermined result. */\ + if r0 == 0 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID") +__success __retval(0) +__naked void fill_32bit_after_spill_64bit_preserve_id(void) +{ + asm volatile (" \ + /* Randomize the lower 32 bits. */ \ + call %[bpf_get_prandom_u32]; \ + w0 &= 0xffffffff; \ + /* 64-bit spill r0 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r0; \ + /* 32-bit fill r1 from stack - should preserve the ID. */\ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r1 = *(u32*)(r10 - 8);" +#else + "r1 = *(u32*)(r10 - 4);" +#endif + " \ + /* Compare r1 with another register to trigger find_equal_scalars. */\ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill should clear ID") +__failure __msg("math between ctx pointer and 4294967295 is not allowed") +__naked void fill_32bit_after_spill_64bit_clear_id(void) +{ + asm volatile (" \ + r6 = r1; \ + /* Roll one bit to force the verifier to track both branches. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x8; \ + /* Put a large number into r1. */ \ + r1 = 0xffffffff; \ + r1 <<= 32; \ + r1 += r0; \ + /* 64-bit spill r1 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r1; \ + /* 32-bit fill r2 from stack - should clear the ID. */\ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r2 = *(u32*)(r10 - 8);" +#else + "r2 = *(u32*)(r10 - 4);" +#endif + " \ + /* Compare r2 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. If the ID was mistakenly preserved on fill, this would\ + * cause the verifier to think that r1 is also equal to zero in one of\ + * the branches, and equal to eight on the other branch.\ + */ \ + r3 = 0; \ + if r2 != r3 goto l0_%=; \ +l0_%=: r1 >>= 32; \ + /* The verifier shouldn't propagate r2's range to r1, so it should\ + * still remember r1 = 0xffffffff and reject the below.\ + */ \ + r6 += r1; \ + r0 = *(u32*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* stacksafe(): check if stack spill of an imprecise scalar in old state + * is considered equivalent to STACK_{MISC,INVALID} in cur state. + */ +SEC("socket") +__success __log_level(2) +__msg("8: (79) r1 = *(u64 *)(r10 -8)") +__msg("8: safe") +__msg("processed 11 insns") +/* STACK_INVALID should prevent verifier in unpriv mode from + * considering states equivalent and force an error on second + * verification path (entry - label 1 - label 2). + */ +__failure_unpriv +__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)") +__msg_unpriv("9: (95) exit") +__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)") +__msg_unpriv("invalid read from stack off -8+2 size 8") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_imprecise_scalar_vs_cur_stack_misc(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" + "goto 2f;" +"1:" + /* conjure STACK_{MISC,INVALID} at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u16*)(r10 - 8) = r0;" + "*(u16*)(r10 - 4) = r0;" +"2:" + /* read fp-8, should be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check that stack spill of a precise scalar in old state + * is not considered equivalent to STACK_MISC in cur state. + */ +SEC("socket") +__success __log_level(2) +/* verifier should visit 'if r1 == 0x2a ...' two times: + * - once for path entry - label 2; + * - once for path entry - label 1 - label 2. + */ +__msg("if r1 == 0x2a goto pc+0") +__msg("if r1 == 0x2a goto pc+0") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_precise_scalar_vs_cur_stack_misc(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" + "goto 2f;" +"1:" + /* conjure STACK_MISC at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u64*)(r10 - 8) = r0;" + "*(u32*)(r10 - 4) = r0;" +"2:" + /* read fp-8, should not be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + /* use r1 in precise context */ + "if r1 == 42 goto +0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check if STACK_MISC in old state is considered + * equivalent to stack spill of a scalar in cur state. + */ +SEC("socket") +__success __log_level(2) +__msg("8: (79) r0 = *(u64 *)(r10 -8)") +__msg("8: safe") +__msg("processed 11 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_stack_misc_vs_cur_scalar(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure STACK_{MISC,INVALID} at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u16*)(r10 - 8) = r0;" + "*(u16*)(r10 - 4) = r0;" + "goto 2f;" +"1:" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" +"2:" + /* read fp-8, should be considered safe on second visit */ + "r0 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check that STACK_MISC in old state is not considered + * equivalent to stack spill of a non-scalar in cur state. + */ +SEC("socket") +__success __log_level(2) +/* verifier should process exit instructions twice: + * - once for path entry - label 2; + * - once for path entry - label 1 - label 2. + */ +__msg("r1 = *(u64 *)(r10 -8)") +__msg("exit") +__msg("r1 = *(u64 *)(r10 -8)") +__msg("exit") +__msg("processed 11 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_stack_misc_vs_cur_ctx_ptr(void) +{ + asm volatile( + /* remember context pointer in r9 */ + "r9 = r1;" + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure STACK_MISC at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u64*)(r10 - 8) = r0;" + "*(u32*)(r10 - 4) = r0;" + "goto 2f;" +"1:" + /* conjure context pointer in fp-8 */ + "*(u64*)(r10 - 8) = r9;" +"2:" + /* read fp-8, should not be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index 9c1aa69650..fb316c080c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -330,7 +330,7 @@ l1_%=: r7 = r0; \ SEC("cgroup/skb") __description("spin_lock: test10 lock in subprog without unlock") -__failure __msg("unlock is missing") +__success __failure_unpriv __msg_unpriv("") __naked void lock_in_subprog_without_unlock(void) { diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index 518329c666..7ea9785738 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -7,6 +7,8 @@ #include <bpf/bpf_endian.h> #include <asm/errno.h> +#include "bpf_compiler.h" + #define TC_ACT_OK 0 #define TC_ACT_SHOT 2 @@ -151,11 +153,11 @@ static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr, __u64 sum = csum; int i; -#pragma unroll + __pragma_loop_unroll for (i = 0; i < 4; i++) sum += (__u32)saddr->in6_u.u6_addr32[i]; -#pragma unroll + __pragma_loop_unroll for (i = 0; i < 4; i++) sum += (__u32)daddr->in6_u.u6_addr32[i]; diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 54cf176511..44e2b0ef23 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -15,6 +15,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" #include "xdping.h" struct { @@ -116,7 +117,7 @@ int xdping_client(struct xdp_md *ctx) return XDP_PASS; if (pinginfo->start) { -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < XDPING_MAX_COUNT; i++) { if (pinginfo->times[i] == 0) break; diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index f013910212..524c38e9cd 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -181,7 +181,7 @@ static int parse_test_spec(struct test_loader *tester, memset(spec, 0, sizeof(*spec)); spec->prog_name = bpf_program__name(prog); - spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */ + spec->prog_flags = testing_prog_flags(); btf = bpf_object__btf(obj); if (!btf) { @@ -501,7 +501,7 @@ static bool is_unpriv_capable_map(struct bpf_map *map) } } -static int do_prog_test_run(int fd_prog, int *retval) +static int do_prog_test_run(int fd_prog, int *retval, bool empty_opts) { __u8 tmp_out[TEST_DATA_LEN << 2] = {}; __u8 tmp_in[TEST_DATA_LEN] = {}; @@ -514,6 +514,10 @@ static int do_prog_test_run(int fd_prog, int *retval) .repeat = 1, ); + if (empty_opts) { + memset(&topts, 0, sizeof(struct bpf_test_run_opts)); + topts.sz = sizeof(struct bpf_test_run_opts); + } err = bpf_prog_test_run_opts(fd_prog, &topts); saved_errno = errno; @@ -649,7 +653,8 @@ void run_subtest(struct test_loader *tester, } } - do_prog_test_run(bpf_program__fd(tprog), &retval); + do_prog_test_run(bpf_program__fd(tprog), &retval, + bpf_program__type(tprog) == BPF_PROG_TYPE_SYSCALL ? true : false); if (retval != subspec->retval && subspec->retval != POINTER_VALUE) { PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval); goto tobj_cleanup; @@ -688,7 +693,7 @@ static void process_subtest(struct test_loader *tester, ++nr_progs; specs = calloc(nr_progs, sizeof(struct test_spec)); - if (!ASSERT_OK_PTR(specs, "Can't alloc specs array")) + if (!ASSERT_OK_PTR(specs, "specs_alloc")) return; i = 0; diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index c028d621c7..d98c72dc56 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -211,7 +211,7 @@ static void test_lpm_map(int keysize) volatile size_t n_matches, n_matches_after_delete; size_t i, j, n_nodes, n_lookups; struct tlpm_node *t, *list = NULL; - struct bpf_lpm_trie_key *key; + struct bpf_lpm_trie_key_u8 *key; uint8_t *data, *value; int r, map; @@ -331,8 +331,8 @@ static void test_lpm_map(int keysize) static void test_lpm_ipaddr(void) { LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key *key_ipv4; - struct bpf_lpm_trie_key *key_ipv6; + struct bpf_lpm_trie_key_u8 *key_ipv4; + struct bpf_lpm_trie_key_u8 *key_ipv6; size_t key_size_ipv4; size_t key_size_ipv6; int map_fd_ipv4; @@ -423,7 +423,7 @@ static void test_lpm_ipaddr(void) static void test_lpm_delete(void) { LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key *key; + struct bpf_lpm_trie_key_u8 *key; size_t key_size; int map_fd; __u64 value; @@ -532,7 +532,7 @@ static void test_lpm_delete(void) static void test_lpm_get_next_key(void) { LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_NO_PREALLOC); - struct bpf_lpm_trie_key *key_p, *next_key_p; + struct bpf_lpm_trie_key_u8 *key_p, *next_key_p; size_t key_size; __u32 value = 0; int map_fd; @@ -693,9 +693,9 @@ static void *lpm_test_command(void *arg) { int i, j, ret, iter, key_size; struct lpm_mt_test_info *info = arg; - struct bpf_lpm_trie_key *key_p; + struct bpf_lpm_trie_key_u8 *key_p; - key_size = sizeof(struct bpf_lpm_trie_key) + sizeof(__u32); + key_size = sizeof(*key_p) + sizeof(__u32); key_p = alloca(key_size); for (iter = 0; iter < info->iter; iter++) for (i = 0; i < MAX_TEST_KEYS; i++) { @@ -717,7 +717,7 @@ static void *lpm_test_command(void *arg) ret = bpf_map_lookup_elem(info->map_fd, key_p, &value); assert(ret == 0 || errno == ENOENT); } else { - struct bpf_lpm_trie_key *next_key_p = alloca(key_size); + struct bpf_lpm_trie_key_u8 *next_key_p = alloca(key_size); ret = bpf_map_get_next_key(info->map_fd, key_p, next_key_p); assert(ret == 0 || errno == ENOENT || errno == ENOMEM); } @@ -752,7 +752,7 @@ static void test_lpm_multi_thread(void) /* create a trie */ value_size = sizeof(__u32); - key_size = sizeof(struct bpf_lpm_trie_key) + value_size; + key_size = sizeof(struct bpf_lpm_trie_key_hdr) + value_size; map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, NULL, key_size, value_size, 100, &opts); /* create 4 threads to test update, delete, lookup and get_next_key */ diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 1b93878901..89ff704e9d 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -547,24 +547,6 @@ int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) return bpf_map__fd(map); } -static bool is_jit_enabled(void) -{ - const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; - bool enabled = false; - int sysctl_fd; - - sysctl_fd = open(jit_sysctl, 0, O_RDONLY); - if (sysctl_fd != -1) { - char tmpc; - - if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) - enabled = (tmpc != '0'); - close(sysctl_fd); - } - - return enabled; -} - int compare_map_keys(int map1_fd, int map2_fd) { __u32 key, next_key; @@ -701,11 +683,69 @@ static const struct argp_option opts[] = { {}, }; +static FILE *libbpf_capture_stream; + +static struct { + char *buf; + size_t buf_sz; +} libbpf_output_capture; + +/* Creates a global memstream capturing INFO and WARN level output + * passed to libbpf_print_fn. + * Returns 0 on success, negative value on failure. + * On failure the description is printed using PRINT_FAIL and + * current test case is marked as fail. + */ +int start_libbpf_log_capture(void) +{ + if (libbpf_capture_stream) { + PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__); + return -EINVAL; + } + + libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf, + &libbpf_output_capture.buf_sz); + if (!libbpf_capture_stream) { + PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno); + return -EINVAL; + } + + return 0; +} + +/* Destroys global memstream created by start_libbpf_log_capture(). + * Returns a pointer to captured data which has to be freed. + * Returned buffer is null terminated. + */ +char *stop_libbpf_log_capture(void) +{ + char *buf; + + if (!libbpf_capture_stream) + return NULL; + + fputc(0, libbpf_capture_stream); + fclose(libbpf_capture_stream); + libbpf_capture_stream = NULL; + /* get 'buf' after fclose(), see open_memstream() documentation */ + buf = libbpf_output_capture.buf; + memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture)); + return buf; +} + static int libbpf_print_fn(enum libbpf_print_level level, const char *format, va_list args) { + if (libbpf_capture_stream && level != LIBBPF_DEBUG) { + va_list args2; + + va_copy(args2, args); + vfprintf(libbpf_capture_stream, format, args2); + } + if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG) return 0; + vfprintf(stdout, format, args); return 0; } @@ -1099,6 +1139,7 @@ static void run_one_test(int test_num) cleanup_cgroup_environment(); stdio_restore(); + free(stop_libbpf_log_capture()); dump_test_log(test, state, false, false, NULL); } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 2f9f6f250f..0ba5a20b19 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -385,13 +385,21 @@ int test__join_cgroup(const char *path); goto goto_label; \ }) +#define ALL_TO_DEV_NULL " >/dev/null 2>&1" + #define SYS_NOFAIL(fmt, ...) \ ({ \ char cmd[1024]; \ - snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + int n; \ + n = snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \ + if (n < sizeof(cmd) && sizeof(cmd) - n >= sizeof(ALL_TO_DEV_NULL)) \ + strcat(cmd, ALL_TO_DEV_NULL); \ system(cmd); \ }) +int start_libbpf_log_capture(void); +char *stop_libbpf_log_capture(void); + static inline __u64 ptr_to_u64(const void *ptr) { return (__u64) (unsigned long) ptr; diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index b0068a9d2c..80c42583f5 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -19,6 +19,7 @@ #include <bpf/libbpf.h> #include "cgroup_helpers.h" +#include "testing_helpers.h" #include "bpf_util.h" #ifndef ENOTSUPP @@ -679,7 +680,7 @@ static int load_path(const struct sock_addr_test *test, const char *path) bpf_program__set_type(prog, BPF_PROG_TYPE_CGROUP_SOCK_ADDR); bpf_program__set_expected_attach_type(prog, test->expected_attach_type); - bpf_program__set_flags(prog, BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS); + bpf_program__set_flags(prog, testing_prog_flags()); err = bpf_object__load(obj); if (err) { diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 43612de44f..024a0faafb 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -2104,9 +2104,9 @@ out: free(options.whitelist); if (options.blacklist) free(options.blacklist); - close(cg_fd); if (cg_created) cleanup_cgroup_environment(); + close(cg_fd); return err; } diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index f36e41435b..df04bda1c9 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -67,6 +67,7 @@ #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) +#define F_NEEDS_JIT_ENABLED (1 << 2) /* need CAP_BPF, CAP_NET_ADMIN, CAP_PERFMON to load progs */ #define ADMIN_CAPS (1ULL << CAP_NET_ADMIN | \ @@ -74,6 +75,7 @@ 1ULL << CAP_BPF) #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" static bool unpriv_disabled = false; +static bool jit_disabled; static int skips; static bool verbose = false; static int verif_log_level = 0; @@ -1341,48 +1343,6 @@ static bool cmp_str_seq(const char *log, const char *exp) return true; } -static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt) -{ - __u32 buf_element_size = sizeof(struct bpf_insn); - struct bpf_prog_info info = {}; - __u32 info_len = sizeof(info); - __u32 xlated_prog_len; - struct bpf_insn *buf; - - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("bpf_prog_get_info_by_fd failed"); - return NULL; - } - - xlated_prog_len = info.xlated_prog_len; - if (xlated_prog_len % buf_element_size) { - printf("Program length %d is not multiple of %d\n", - xlated_prog_len, buf_element_size); - return NULL; - } - - *cnt = xlated_prog_len / buf_element_size; - buf = calloc(*cnt, buf_element_size); - if (!buf) { - perror("can't allocate xlated program buffer"); - return NULL; - } - - bzero(&info, sizeof(info)); - info.xlated_prog_len = xlated_prog_len; - info.xlated_prog_insns = (__u64)(unsigned long)buf; - if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { - perror("second bpf_prog_get_info_by_fd failed"); - goto out_free_buf; - } - - return buf; - -out_free_buf: - free(buf); - return NULL; -} - static bool is_null_insn(struct bpf_insn *insn) { struct bpf_insn null_insn = {}; @@ -1505,7 +1465,7 @@ static void print_insn(struct bpf_insn *buf, int cnt) static bool check_xlated_program(struct bpf_test *test, int fd_prog) { struct bpf_insn *buf; - int cnt; + unsigned int cnt; bool result = true; bool check_expected = !is_null_insn(test->expected_insns); bool check_unexpected = !is_null_insn(test->unexpected_insns); @@ -1513,8 +1473,7 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog) if (!check_expected && !check_unexpected) goto out; - buf = get_xlated_program(fd_prog, &cnt); - if (!buf) { + if (get_xlated_program(fd_prog, &buf, &cnt)) { printf("FAIL: can't get xlated program\n"); result = false; goto out; @@ -1567,6 +1526,13 @@ static void do_test_single(struct bpf_test *test, bool unpriv, __u32 pflags; int i, err; + if ((test->flags & F_NEEDS_JIT_ENABLED) && jit_disabled) { + printf("SKIP (requires BPF JIT)\n"); + skips++; + sched_yield(); + return; + } + fd_prog = -1; for (i = 0; i < MAX_NR_MAPS; i++) map_fds[i] = -1; @@ -1588,7 +1554,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, if (fixup_skips != skips) return; - pflags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS; + pflags = testing_prog_flags(); if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) pflags |= BPF_F_STRICT_ALIGNMENT; if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) @@ -1887,6 +1853,8 @@ int main(int argc, char **argv) return EXIT_FAILURE; } + jit_disabled = !is_jit_enabled(); + /* Use libbpf 1.0 API mode */ libbpf_set_strict_mode(LIBBPF_STRICT_ALL); diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index d2458c1b16..28b6646662 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -252,6 +252,34 @@ __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) int extra_prog_load_log_flags = 0; +int testing_prog_flags(void) +{ + static int cached_flags = -1; + static int prog_flags[] = { BPF_F_TEST_RND_HI32, BPF_F_TEST_REG_INVARIANTS }; + static struct bpf_insn insns[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + int insn_cnt = ARRAY_SIZE(insns), i, fd, flags = 0; + LIBBPF_OPTS(bpf_prog_load_opts, opts); + + if (cached_flags >= 0) + return cached_flags; + + for (i = 0; i < ARRAY_SIZE(prog_flags); i++) { + opts.prog_flags = prog_flags[i]; + fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "flag-test", "GPL", + insns, insn_cnt, &opts); + if (fd >= 0) { + flags |= prog_flags[i]; + close(fd); + } + } + + cached_flags = flags; + return cached_flags; +} + int bpf_prog_test_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd) { @@ -276,7 +304,7 @@ int bpf_prog_test_load(const char *file, enum bpf_prog_type type, if (type != BPF_PROG_TYPE_UNSPEC && bpf_program__type(prog) != type) bpf_program__set_type(prog, type); - flags = bpf_program__flags(prog) | BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS; + flags = bpf_program__flags(prog) | testing_prog_flags(); bpf_program__set_flags(prog, flags); err = bpf_object__load(obj); @@ -299,7 +327,7 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, { LIBBPF_OPTS(bpf_prog_load_opts, opts, .kern_version = kern_version, - .prog_flags = BPF_F_TEST_RND_HI32 | BPF_F_TEST_REG_INVARIANTS, + .prog_flags = testing_prog_flags(), .log_level = extra_prog_load_log_flags, .log_buf = log_buf, .log_size = log_buf_sz, @@ -328,12 +356,12 @@ __u64 read_perf_max_sample_freq(void) return sample_freq; } -static int finit_module(int fd, const char *param_values, int flags) +int finit_module(int fd, const char *param_values, int flags) { return syscall(__NR_finit_module, fd, param_values, flags); } -static int delete_module(const char *name, int flags) +int delete_module(const char *name, int flags) { return syscall(__NR_delete_module, name, flags); } @@ -387,3 +415,63 @@ int kern_sync_rcu(void) { return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); } + +int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt) +{ + __u32 buf_element_size = sizeof(struct bpf_insn); + struct bpf_prog_info info = {}; + __u32 info_len = sizeof(info); + __u32 xlated_prog_len; + + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("bpf_prog_get_info_by_fd failed"); + return -1; + } + + xlated_prog_len = info.xlated_prog_len; + if (xlated_prog_len % buf_element_size) { + printf("Program length %u is not multiple of %u\n", + xlated_prog_len, buf_element_size); + return -1; + } + + *cnt = xlated_prog_len / buf_element_size; + *buf = calloc(*cnt, buf_element_size); + if (!buf) { + perror("can't allocate xlated program buffer"); + return -ENOMEM; + } + + bzero(&info, sizeof(info)); + info.xlated_prog_len = xlated_prog_len; + info.xlated_prog_insns = (__u64)(unsigned long)*buf; + if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { + perror("second bpf_prog_get_info_by_fd failed"); + goto out_free_buf; + } + + return 0; + +out_free_buf: + free(*buf); + *buf = NULL; + return -1; +} + +bool is_jit_enabled(void) +{ + const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable"; + bool enabled = false; + int sysctl_fd; + + sysctl_fd = open(jit_sysctl, O_RDONLY); + if (sysctl_fd != -1) { + char tmpc; + + if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1) + enabled = (tmpc != '0'); + close(sysctl_fd); + } + + return enabled; +} diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index 35284faff4..d55f6ab124 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -36,6 +36,8 @@ __u64 read_perf_max_sample_freq(void); int load_bpf_testmod(bool verbose); int unload_bpf_testmod(bool verbose); int kern_sync_rcu(void); +int finit_module(int fd, const char *param_values, int flags); +int delete_module(const char *name, int flags); static inline __u64 get_time_ns(void) { @@ -46,4 +48,12 @@ static inline __u64 get_time_ns(void) return (u64)t.tv_sec * 1000000000 + t.tv_nsec; } +struct bpf_insn; +/* Request BPF program instructions after all rewrites are applied, + * e.g. verifier.c:convert_ctx_access() is done. + */ +int get_xlated_program(int fd_prog, struct bpf_insn **buf, __u32 *cnt); +int testing_prog_flags(void); +bool is_jit_enabled(void); + #endif /* __TESTING_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c index a535d41dc2..59125b22ae 100644 --- a/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c +++ b/tools/testing/selftests/bpf/verifier/bpf_loop_inline.c @@ -57,6 +57,7 @@ .expected_insns = { PSEUDO_CALL_INSN() }, .unexpected_insns = { HELPER_CALL_INSN() }, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .result = ACCEPT, .runs = 0, .func_info = { { 0, MAIN_TYPE }, { 12, CALLBACK_TYPE } }, @@ -90,6 +91,7 @@ .expected_insns = { HELPER_CALL_INSN() }, .unexpected_insns = { PSEUDO_CALL_INSN() }, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .result = ACCEPT, .runs = 0, .func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } }, @@ -127,6 +129,7 @@ .expected_insns = { HELPER_CALL_INSN() }, .unexpected_insns = { PSEUDO_CALL_INSN() }, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .result = ACCEPT, .runs = 0, .func_info = { @@ -165,6 +168,7 @@ .expected_insns = { PSEUDO_CALL_INSN() }, .unexpected_insns = { HELPER_CALL_INSN() }, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .result = ACCEPT, .runs = 0, .func_info = { @@ -235,6 +239,7 @@ }, .unexpected_insns = { HELPER_CALL_INSN() }, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .result = ACCEPT, .func_info = { { 0, MAIN_TYPE }, @@ -252,6 +257,7 @@ .unexpected_insns = { HELPER_CALL_INSN() }, .result = ACCEPT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, + .flags = F_NEEDS_JIT_ENABLED, .func_info = { { 0, MAIN_TYPE }, { 16, CALLBACK_TYPE } }, .func_info_cnt = 2, BTF_TYPES diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 8a2ff81d83..0a9293a572 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -183,10 +183,10 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, .errstr = "mark_precise: frame0: last_idx 7 first_idx 7\ - mark_precise: frame0: parent state regs=r4 stack=:\ + mark_precise: frame0: parent state regs=r4 stack=-8:\ mark_precise: frame0: last_idx 6 first_idx 4\ - mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\ - mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\ + mark_precise: frame0: regs=r4 stack=-8 before 6: (b7) r0 = -1\ + mark_precise: frame0: regs=r4 stack=-8 before 5: (79) r4 = *(u64 *)(r10 -8)\ mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\ mark_precise: frame0: parent state regs=r0 stack=:\ mark_precise: frame0: last_idx 3 first_idx 3\ diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 878d68db03..bdf5d81800 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -480,7 +480,7 @@ peek: for (int j = 0; j < 500; j++) { if (complete_tx(xsk, clock_id)) break; - usleep(10*1000); + usleep(10); } } } |