diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
74 files changed, 2961 insertions, 182 deletions
diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c new file mode 100644 index 0000000000..1e6ac187a6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_htab.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +} arena SEC(".maps"); + +#include "bpf_arena_htab.h" + +void __arena *htab_for_user; +bool skip = false; + +int zero = 0; + +SEC("syscall") +int arena_htab_llvm(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM) + struct htab __arena *htab; + __u64 i; + + htab = bpf_alloc(sizeof(*htab)); + cast_kern(htab); + htab_init(htab); + + /* first run. No old elems in the table */ + for (i = zero; i < 1000; i++) + htab_update_elem(htab, i, i); + + /* should replace all elems with new ones */ + for (i = zero; i < 1000; i++) + htab_update_elem(htab, i, i); + cast_user(htab); + htab_for_user = htab; +#else + skip = true; +#endif + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/arena_htab_asm.c b/tools/testing/selftests/bpf/progs/arena_htab_asm.c new file mode 100644 index 0000000000..6cd70ea12f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_htab_asm.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_ARENA_FORCE_ASM +#define arena_htab_llvm arena_htab_asm +#include "arena_htab.c" diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c new file mode 100644 index 0000000000..c0422c58ce --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_list.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_experimental.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */ +#else + __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +#include "bpf_arena_alloc.h" +#include "bpf_arena_list.h" + +struct elem { + struct arena_list_node node; + __u64 value; +}; + +struct arena_list_head __arena *list_head; +int list_sum; +int cnt; +bool skip = false; + +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST +long __arena arena_sum; +int __arena test_val = 1; +struct arena_list_head __arena global_head; +#else +long arena_sum SEC(".addr_space.1"); +int test_val SEC(".addr_space.1"); +#endif + +int zero; + +SEC("syscall") +int arena_list_add(void *ctx) +{ +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST + __u64 i; + + list_head = &global_head; + + for (i = zero; i < cnt; cond_break, i++) { + struct elem __arena *n = bpf_alloc(sizeof(*n)); + + test_val++; + n->value = i; + arena_sum += i; + list_add_head(&n->node, list_head); + } +#else + skip = true; +#endif + return 0; +} + +SEC("syscall") +int arena_list_del(void *ctx) +{ +#ifdef __BPF_FEATURE_ADDR_SPACE_CAST + struct elem __arena *n; + int sum = 0; + + arena_sum = 0; + list_for_each_entry(n, list_head, node) { + sum += n->value; + arena_sum += n->value; + list_del(&n->node); + bpf_free(n); + } + list_sum = sum; +#else + skip = true; +#endif + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/async_stack_depth.c b/tools/testing/selftests/bpf/progs/async_stack_depth.c index 3517c0e012..36734683ac 100644 --- a/tools/testing/selftests/bpf/progs/async_stack_depth.c +++ b/tools/testing/selftests/bpf/progs/async_stack_depth.c @@ -30,7 +30,7 @@ static int bad_timer_cb(void *map, int *key, struct bpf_timer *timer) } SEC("tc") -__failure __msg("combined stack size of 2 calls is 576. Too large") +__failure __msg("combined stack size of 2 calls is") int pseudo_call_check(struct __sk_buff *ctx) { struct hmap_elem *elem; @@ -45,7 +45,7 @@ int pseudo_call_check(struct __sk_buff *ctx) } SEC("tc") -__failure __msg("combined stack size of 2 calls is 608. Too large") +__failure __msg("combined stack size of 2 calls is") int async_call_root_check(struct __sk_buff *ctx) { struct hmap_elem *elem; diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops.c b/tools/testing/selftests/bpf/progs/bad_struct_ops.c new file mode 100644 index 0000000000..b7e175cd0a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) { return 0; } + +SEC("struct_ops/test_2") +int BPF_PROG(test_2) { return 0; } + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2 +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops2 testmod_2 = { + .test_1 = (void *)test_1 +}; diff --git a/tools/testing/selftests/bpf/progs/bad_struct_ops2.c b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c new file mode 100644 index 0000000000..64a95f6be8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bad_struct_ops2.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +/* This is an unused struct_ops program, it lacks corresponding + * struct_ops map, which provides attachment information. + * W/o additional configuration attempt to load such + * BPF object file would fail. + */ +SEC("struct_ops/foo") +void foo(void) {} diff --git a/tools/testing/selftests/bpf/progs/bpf_compiler.h b/tools/testing/selftests/bpf/progs/bpf_compiler.h new file mode 100644 index 0000000000..a7c343dc82 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/bpf_compiler.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BPF_COMPILER_H__ +#define __BPF_COMPILER_H__ + +#define DO_PRAGMA_(X) _Pragma(#X) + +#if __clang__ +#define __pragma_loop_unroll DO_PRAGMA_(clang loop unroll(enable)) +#else +/* In GCC -funroll-loops, which is enabled with -O2, should have the + same impact than the loop-unroll-enable pragma above. */ +#define __pragma_loop_unroll +#endif + +#if __clang__ +#define __pragma_loop_unroll_count(N) DO_PRAGMA_(clang loop unroll_count(N)) +#else +#define __pragma_loop_unroll_count(N) DO_PRAGMA_(GCC unroll N) +#endif + +#if __clang__ +#define __pragma_loop_unroll_full DO_PRAGMA_(clang loop unroll(full)) +#else +#define __pragma_loop_unroll_full DO_PRAGMA_(GCC unroll 65534) +#endif + +#if __clang__ +#define __pragma_loop_no_unroll DO_PRAGMA_(clang loop unroll(disable)) +#else +#define __pragma_loop_no_unroll DO_PRAGMA_(GCC unroll 1) +#endif + +#endif diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index 2fd59970c4..fb2f5513e2 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -80,7 +80,7 @@ #define __imm(name) [name]"i"(name) #define __imm_const(name, expr) [name]"i"(expr) #define __imm_addr(name) [name]"i"(&name) -#define __imm_ptr(name) [name]"p"(&name) +#define __imm_ptr(name) [name]"r"(&name) #define __imm_insn(name, expr) [name]"i"(*(long *)&(expr)) /* Magic constants used with __retval() */ diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h index e8bd4b7b5e..7001965d1c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h +++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h @@ -51,9 +51,25 @@ #define ICSK_TIME_LOSS_PROBE 5 #define ICSK_TIME_REO_TIMEOUT 6 +#define ETH_ALEN 6 #define ETH_HLEN 14 +#define ETH_P_IP 0x0800 #define ETH_P_IPV6 0x86DD +#define NEXTHDR_TCP 6 + +#define TCPOPT_NOP 1 +#define TCPOPT_EOL 0 +#define TCPOPT_MSS 2 +#define TCPOPT_WINDOW 3 +#define TCPOPT_TIMESTAMP 8 +#define TCPOPT_SACK_PERM 4 + +#define TCPOLEN_MSS 4 +#define TCPOLEN_WINDOW 3 +#define TCPOLEN_TIMESTAMP 10 +#define TCPOLEN_SACK_PERM 2 + #define CHECKSUM_NONE 0 #define CHECKSUM_PARTIAL 3 diff --git a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c index 610c2427fd..3500e4b69e 100644 --- a/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c +++ b/tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c @@ -27,32 +27,6 @@ bool is_cgroup1 = 0; struct cgroup *bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) __ksym; void bpf_cgroup_release(struct cgroup *cgrp) __ksym; -static void __on_lookup(struct cgroup *cgrp) -{ - bpf_cgrp_storage_delete(&map_a, cgrp); - bpf_cgrp_storage_delete(&map_b, cgrp); -} - -SEC("fentry/bpf_local_storage_lookup") -int BPF_PROG(on_lookup) -{ - struct task_struct *task = bpf_get_current_task_btf(); - struct cgroup *cgrp; - - if (is_cgroup1) { - cgrp = bpf_task_get_cgroup1(task, target_hid); - if (!cgrp) - return 0; - - __on_lookup(cgrp); - bpf_cgroup_release(cgrp); - return 0; - } - - __on_lookup(task->cgroups->dfl_cgrp); - return 0; -} - static void __on_update(struct cgroup *cgrp) { long *ptr; diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c index ca8aa2f116..2ef0e0c46d 100644 --- a/tools/testing/selftests/bpf/progs/connect_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c @@ -28,8 +28,7 @@ int connect_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 0; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 0; diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 0cd4aebb97..c705d8112a 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -23,41 +23,42 @@ struct array_map { __uint(max_entries, 1); } __cpumask_map SEC(".maps"); -struct bpf_cpumask *bpf_cpumask_create(void) __ksym; -void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; -struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; -u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; -u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +struct bpf_cpumask *bpf_cpumask_create(void) __ksym __weak; +void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym __weak; +struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym __weak; +u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym __weak; +u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym __weak; u32 bpf_cpumask_first_and(const struct cpumask *src1, - const struct cpumask *src2) __ksym; -void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym; -void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym; + const struct cpumask *src2) __ksym __weak; +void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_and_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_test_and_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_setall(struct bpf_cpumask *cpumask) __ksym __weak; +void bpf_cpumask_clear(struct bpf_cpumask *cpumask) __ksym __weak; bool bpf_cpumask_and(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; + const struct cpumask *src2) __ksym __weak; void bpf_cpumask_or(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; + const struct cpumask *src2) __ksym __weak; void bpf_cpumask_xor(struct bpf_cpumask *cpumask, const struct cpumask *src1, - const struct cpumask *src2) __ksym; -bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym; -bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; -void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; -u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym; - -void bpf_rcu_read_lock(void) __ksym; -void bpf_rcu_read_unlock(void) __ksym; + const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_equal(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_intersects(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) __ksym __weak; +bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym __weak; +bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym __weak; +void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym __weak; +u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym __weak; +u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, + const struct cpumask *src2) __ksym __weak; +u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym __weak; + +void bpf_rcu_read_lock(void) __ksym __weak; +void bpf_rcu_read_unlock(void) __ksym __weak; static inline const struct cpumask *cast(struct bpf_cpumask *cpumask) { diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c index 9c078f34bb..5a76754f84 100644 --- a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c @@ -27,8 +27,7 @@ int getpeername_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c index ac71451114..7867113c69 100644 --- a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c @@ -27,8 +27,7 @@ int getsockname_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index fe971992e6..3db416606f 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -5,6 +5,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_compiler.h" #define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) @@ -78,8 +79,8 @@ int iter_err_unsafe_asm_loop(const void *ctx) "*(u32 *)(r1 + 0) = r6;" /* invalid */ : : [it]"r"(&it), - [small_arr]"p"(small_arr), - [zero]"p"(zero), + [small_arr]"r"(small_arr), + [zero]"r"(zero), __imm(bpf_iter_num_new), __imm(bpf_iter_num_next), __imm(bpf_iter_num_destroy) @@ -183,7 +184,7 @@ int iter_pragma_unroll_loop(const void *ctx) MY_PID_GUARD(); bpf_iter_num_new(&it, 0, 2); -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 3; i++) { v = bpf_iter_num_next(&it); bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); @@ -238,7 +239,7 @@ int iter_multiple_sequential_loops(const void *ctx) bpf_iter_num_destroy(&it); bpf_iter_num_new(&it, 0, 2); -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 3; i++) { v = bpf_iter_num_next(&it); bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1); diff --git a/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c new file mode 100644 index 0000000000..2414ac20b6 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kptr_xchg_inline.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <linux/types.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_experimental.h" +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +struct bin_data { + char blob[32]; +}; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) +private(kptr) struct bin_data __kptr * ptr; + +SEC("tc") +__naked int kptr_xchg_inline(void) +{ + asm volatile ( + "r1 = %[ptr] ll;" + "r2 = 0;" + "call %[bpf_kptr_xchg];" + "if r0 == 0 goto 1f;" + "r1 = r0;" + "r2 = 0;" + "call %[bpf_obj_drop_impl];" + "1:" + "r0 = 0;" + "exit;" + : + : __imm_addr(ptr), + __imm(bpf_kptr_xchg), + __imm(bpf_obj_drop_impl) + : __clobber_all + ); +} + +/* BTF FUNC records are not generated for kfuncs referenced + * from inline assembly. These records are necessary for + * libbpf to link the program. The function below is a hack + * to ensure that BTF FUNC records are generated. + */ +void __btf_root(void) +{ + bpf_obj_drop(NULL); +} diff --git a/tools/testing/selftests/bpf/progs/loop4.c b/tools/testing/selftests/bpf/progs/loop4.c index b35337926d..0de0357f57 100644 --- a/tools/testing/selftests/bpf/progs/loop4.c +++ b/tools/testing/selftests/bpf/progs/loop4.c @@ -3,6 +3,8 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + char _license[] SEC("license") = "GPL"; SEC("socket") @@ -10,7 +12,7 @@ int combinations(volatile struct __sk_buff* skb) { int ret = 0, i; -#pragma nounroll + __pragma_loop_no_unroll for (i = 0; i < 20; i++) if (skb->len) ret |= 1 << i; diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c index 3325da17ec..efaf622c28 100644 --- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c +++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c @@ -316,7 +316,7 @@ struct lpm_trie { } __attribute__((preserve_access_index)); struct lpm_key { - struct bpf_lpm_trie_key trie_key; + struct bpf_lpm_trie_key_hdr trie_key; __u32 data; }; diff --git a/tools/testing/selftests/bpf/progs/priv_map.c b/tools/testing/selftests/bpf/progs/priv_map.c new file mode 100644 index 0000000000..9085be50f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/priv_map.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_QUEUE); + __uint(max_entries, 1); + __type(value, __u32); +} priv_map SEC(".maps"); diff --git a/tools/testing/selftests/bpf/progs/priv_prog.c b/tools/testing/selftests/bpf/progs/priv_prog.c new file mode 100644 index 0000000000..3c7b2b618c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/priv_prog.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +SEC("kprobe") +int kprobe_prog(void *ctx) +{ + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index de3b6e4e4d..6957d9f280 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -8,6 +8,7 @@ #include "profiler.h" #include "err.h" #include "bpf_experimental.h" +#include "bpf_compiler.h" #ifndef NULL #define NULL 0 @@ -169,7 +170,7 @@ static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct, int spid) { #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) if (arr_struct->array[i].meta.pid == spid) @@ -185,7 +186,7 @@ static INLINE void populate_ancestors(struct task_struct* task, ancestors_data->num_ancestors = 0; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) { parent = BPF_CORE_READ(parent, real_parent); @@ -212,7 +213,7 @@ static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node, size_t filepart_length; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) { filepart_length = @@ -261,7 +262,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data, int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local, pids_cgrp_id___local); #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys_state* subsys = @@ -402,7 +403,7 @@ static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig) if (kill_data == NULL) return 0; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) if (arr_struct->array[i].meta.pid == 0) { @@ -482,7 +483,7 @@ read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload) struct dentry* parent_dentry; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_PATH_DEPTH; i++) { filepart_length = @@ -508,7 +509,7 @@ is_ancestor_in_allowed_inodes(struct dentry* filp_dentry) { struct dentry* parent_dentry; #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < MAX_PATH_DEPTH; i++) { u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino); @@ -629,7 +630,7 @@ int raw_tracepoint__sched_process_exit(void* ctx) struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); #ifdef UNROLL -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) { struct var_kill_data_t* past_kill_data = &arr_struct->array[i]; diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 026d573ce1..86484f07e1 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -8,6 +8,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_compiler.h" #define FUNCTION_NAME_LEN 64 #define FILE_NAME_LEN 128 @@ -298,11 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx) #if defined(USE_ITER) /* no for loop, no unrolling */ #elif defined(NO_UNROLL) -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #elif defined(UNROLL_COUNT) -#pragma clang loop unroll_count(UNROLL_COUNT) + __pragma_loop_unroll_count(UNROLL_COUNT) #else -#pragma clang loop unroll(full) + __pragma_loop_unroll_full #endif /* NO_UNROLL */ /* Unwind python stack */ #ifdef USE_ITER diff --git a/tools/testing/selftests/bpf/progs/rcu_read_lock.c b/tools/testing/selftests/bpf/progs/rcu_read_lock.c index 14fb01437f..ab3a532b7d 100644 --- a/tools/testing/selftests/bpf/progs/rcu_read_lock.c +++ b/tools/testing/selftests/bpf/progs/rcu_read_lock.c @@ -319,3 +319,123 @@ int cross_rcu_region(void *ctx) bpf_rcu_read_unlock(); return 0; } + +__noinline +static int static_subprog(void *ctx) +{ + volatile int ret = 0; + + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog(NULL); +} + +__noinline +static int static_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog_lock(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog_lock(NULL); +} + +__noinline +static int static_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_unlock(); + if (bpf_get_prandom_u32()) + return ret + 42; + return ret + bpf_get_prandom_u32(); +} + +__noinline +int global_subprog_unlock(u64 a) +{ + volatile int ret = a; + + return ret + static_subprog_unlock(NULL); +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + ret += static_subprog(ctx); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + if (bpf_get_prandom_u32()) + ret += global_subprog(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + ret += static_subprog_lock(ctx); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog_lock(void *ctx) +{ + volatile int ret = 0; + + ret += global_subprog_lock(ret); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += static_subprog_unlock(ctx); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int rcu_read_lock_global_subprog_unlock(void *ctx) +{ + volatile int ret = 0; + + bpf_rcu_read_lock(); + ret += global_subprog_unlock(ret); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c index 4dfbc85525..1c7ab44bcc 100644 --- a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c @@ -27,8 +27,7 @@ int recvmsg_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 1; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS, sizeof(SERVUN_ADDRESS) - 1) != 0) return 1; diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c index 1f67e83266..d8869b03dd 100644 --- a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c +++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c @@ -28,8 +28,7 @@ int sendmsg_unix_prog(struct bpf_sock_addr *ctx) if (sa_kern->uaddrlen != unaddrlen) return 0; - sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, - bpf_core_type_id_kernel(struct sockaddr_un)); + sa_kern_unaddr = bpf_core_cast(sa_kern->uaddr, struct sockaddr_un); if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) return 0; diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c index 3e745793b2..46d6eb2a3b 100644 --- a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c +++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c @@ -12,8 +12,6 @@ int cookie_found = 0; __u64 cookie = 0; __u32 omem = 0; -void *bpf_rdonly_cast(void *, __u32) __ksym; - struct { __uint(type, BPF_MAP_TYPE_SK_STORAGE); __uint(map_flags, BPF_F_NO_PREALLOC); @@ -29,7 +27,7 @@ int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage) if (local_storage_ptr != local_storage) return 0; - sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk_ptr, struct sock); if (sk->sk_cookie.counter != cookie) return 0; diff --git a/tools/testing/selftests/bpf/progs/sock_iter_batch.c b/tools/testing/selftests/bpf/progs/sock_iter_batch.c index ffbbfe1fa1..96531b0d9d 100644 --- a/tools/testing/selftests/bpf/progs/sock_iter_batch.c +++ b/tools/testing/selftests/bpf/progs/sock_iter_batch.c @@ -32,7 +32,7 @@ int iter_tcp_soreuse(struct bpf_iter__tcp *ctx) if (!sk) return 0; - sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != AF_INET6 || sk->sk_state != TCP_LISTEN || !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) @@ -68,7 +68,7 @@ int iter_udp_soreuse(struct bpf_iter__udp *ctx) if (!sk) return 0; - sk = bpf_rdonly_cast(sk, bpf_core_type_id_kernel(struct sock)); + sk = bpf_core_cast(sk, struct sock); if (sk->sk_family != AF_INET6 || !ipv6_addr_loopback(&sk->sk_v6_rcv_saddr)) return 0; diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 40df2cc26e..f74459eead 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -10,6 +10,8 @@ #include <linux/types.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + typedef uint32_t pid_t; struct task_struct {}; @@ -419,9 +421,9 @@ static __always_inline uint64_t read_map_var(struct strobemeta_cfg *cfg, } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif for (int i = 0; i < STROBE_MAX_MAP_ENTRIES; ++i) { if (i >= map.cnt) @@ -560,25 +562,25 @@ static void *read_strobe_meta(struct task_struct *task, payload_off = sizeof(data->payload); #else #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_INTS; ++i) { read_int_var(cfg, i, tls_base, &value, data); } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_STRS; ++i) { payload_off = read_str_var(cfg, i, tls_base, &value, data, payload_off); } #ifdef NO_UNROLL -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll #else -#pragma unroll + __pragma_loop_unroll #endif /* NO_UNROLL */ for (int i = 0; i < STROBE_MAX_MAPS; ++i) { payload_off = read_map_var(cfg, i, tls_base, &value, data, payload_off); diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c new file mode 100644 index 0000000000..ba10c38962 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + test_1_result = 42; + return 0; +} + +SEC("struct_ops/test_1") +int BPF_PROG(test_2) +{ + return 0; +} + +struct bpf_testmod_ops___v1 { + int (*test_1)(void); +}; + +struct bpf_testmod_ops___v2 { + int (*test_1)(void); + int (*does_not_exist)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v1 testmod_1 = { + .test_1 = (void *)test_1 +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v2 testmod_2 = { + .test_1 = (void *)test_1, + .does_not_exist = (void *)test_2 +}; + +SEC("?.struct_ops") +struct bpf_testmod_ops___v1 optional_map = { + .test_1 = (void *)test_1, +}; + +SEC("?.struct_ops.link") +struct bpf_testmod_ops___v1 optional_map2 = { + .test_1 = (void *)test_1, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c new file mode 100644 index 0000000000..6049d9c902 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_autocreate2.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; + +SEC("?struct_ops/test_1") +int BPF_PROG(foo) +{ + test_1_result = 42; + return 0; +} + +SEC("?struct_ops/test_1") +int BPF_PROG(bar) +{ + test_1_result = 24; + return 0; +} + +struct bpf_testmod_ops { + int (*test_1)(void); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)bar +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c new file mode 100644 index 0000000000..b450f72e74 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +pid_t tgid = 0; + +/* This is a test BPF program that uses struct_ops to access an argument + * that may be NULL. This is a test for the verifier to ensure that it can + * rip PTR_MAYBE_NULL correctly. + */ +SEC("struct_ops/test_maybe_null") +int BPF_PROG(test_maybe_null, int dummy, + struct task_struct *task) +{ + if (task) + tgid = task->tgid; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_maybe_null = (void *)test_maybe_null, +}; + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c new file mode 100644 index 0000000000..6283099ec3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_maybe_null_fail.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +pid_t tgid = 0; + +SEC("struct_ops/test_maybe_null_struct_ptr") +int BPF_PROG(test_maybe_null_struct_ptr, int dummy, + struct task_struct *task) +{ + tgid = task->tgid; + + return 0; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_struct_ptr = { + .test_maybe_null = (void *)test_maybe_null_struct_ptr, +}; + diff --git a/tools/testing/selftests/bpf/progs/struct_ops_module.c b/tools/testing/selftests/bpf/progs/struct_ops_module.c new file mode 100644 index 0000000000..026cabfa7f --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_module.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +int test_1_result = 0; +int test_2_result = 0; + +SEC("struct_ops/test_1") +int BPF_PROG(test_1) +{ + test_1_result = 0xdeadbeef; + return 0; +} + +SEC("struct_ops/test_2") +void BPF_PROG(test_2, int a, int b) +{ + test_2_result = a + b; +} + +SEC("struct_ops/test_3") +int BPF_PROG(test_3, int a, int b) +{ + test_2_result = a + b + 3; + return a + b + 3; +} + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_1 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2, + .data = 0x1, +}; + +SEC("struct_ops/test_2") +void BPF_PROG(test_2_v2, int a, int b) +{ + test_2_result = a * b; +} + +struct bpf_testmod_ops___v2 { + int (*test_1)(void); + void (*test_2)(int a, int b); + int (*test_maybe_null)(int dummy, struct task_struct *task); +}; + +SEC(".struct_ops.link") +struct bpf_testmod_ops___v2 testmod_2 = { + .test_1 = (void *)test_1, + .test_2 = (void *)test_2_v2, +}; diff --git a/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c new file mode 100644 index 0000000000..9efcc6e4d3 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_multi_pages.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +#define TRAMP(x) \ + SEC("struct_ops/tramp_" #x) \ + int BPF_PROG(tramp_ ## x, int a) \ + { \ + return a; \ + } + +TRAMP(1) +TRAMP(2) +TRAMP(3) +TRAMP(4) +TRAMP(5) +TRAMP(6) +TRAMP(7) +TRAMP(8) +TRAMP(9) +TRAMP(10) +TRAMP(11) +TRAMP(12) +TRAMP(13) +TRAMP(14) +TRAMP(15) +TRAMP(16) +TRAMP(17) +TRAMP(18) +TRAMP(19) +TRAMP(20) +TRAMP(21) +TRAMP(22) +TRAMP(23) +TRAMP(24) +TRAMP(25) +TRAMP(26) +TRAMP(27) +TRAMP(28) +TRAMP(29) +TRAMP(30) +TRAMP(31) +TRAMP(32) +TRAMP(33) +TRAMP(34) +TRAMP(35) +TRAMP(36) +TRAMP(37) +TRAMP(38) +TRAMP(39) +TRAMP(40) + +#define F_TRAMP(x) .tramp_ ## x = (void *)tramp_ ## x + +SEC(".struct_ops.link") +struct bpf_testmod_ops multi_pages = { + F_TRAMP(1), + F_TRAMP(2), + F_TRAMP(3), + F_TRAMP(4), + F_TRAMP(5), + F_TRAMP(6), + F_TRAMP(7), + F_TRAMP(8), + F_TRAMP(9), + F_TRAMP(10), + F_TRAMP(11), + F_TRAMP(12), + F_TRAMP(13), + F_TRAMP(14), + F_TRAMP(15), + F_TRAMP(16), + F_TRAMP(17), + F_TRAMP(18), + F_TRAMP(19), + F_TRAMP(20), + F_TRAMP(21), + F_TRAMP(22), + F_TRAMP(23), + F_TRAMP(24), + F_TRAMP(25), + F_TRAMP(26), + F_TRAMP(27), + F_TRAMP(28), + F_TRAMP(29), + F_TRAMP(30), + F_TRAMP(31), + F_TRAMP(32), + F_TRAMP(33), + F_TRAMP(34), + F_TRAMP(35), + F_TRAMP(36), + F_TRAMP(37), + F_TRAMP(38), + F_TRAMP(39), + F_TRAMP(40), +}; diff --git a/tools/testing/selftests/bpf/progs/task_ls_recursion.c b/tools/testing/selftests/bpf/progs/task_ls_recursion.c index 4542dc683b..f1853c38aa 100644 --- a/tools/testing/selftests/bpf/progs/task_ls_recursion.c +++ b/tools/testing/selftests/bpf/progs/task_ls_recursion.c @@ -27,23 +27,6 @@ struct { __type(value, long); } map_b SEC(".maps"); -SEC("fentry/bpf_local_storage_lookup") -int BPF_PROG(on_lookup) -{ - struct task_struct *task = bpf_get_current_task_btf(); - - if (!test_pid || task->pid != test_pid) - return 0; - - /* The bpf_task_storage_delete will call - * bpf_local_storage_lookup. The prog->active will - * stop the recursion. - */ - bpf_task_storage_delete(&map_a, task); - bpf_task_storage_delete(&map_b, task); - return 0; -} - SEC("fentry/bpf_local_storage_update") int BPF_PROG(on_update) { diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c index 66b3049822..683c8aaa63 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c @@ -20,8 +20,11 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" #include "test_cls_redirect.h" +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" + #ifdef SUBPROGS #define INLINING __noinline #else @@ -267,7 +270,7 @@ static INLINING void pkt_ipv4_checksum(struct iphdr *iph) uint32_t acc = 0; uint16_t *ipw = (uint16_t *)iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (size_t i = 0; i < sizeof(struct iphdr) / 2; i++) { acc += ipw[i]; } @@ -294,7 +297,7 @@ bool pkt_skip_ipv6_extension_headers(buf_t *pkt, }; *is_fragment = false; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 6; i++) { switch (exthdr.next) { case IPPROTO_FRAGMENT: diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c index f41c81212e..da54c09e9a 100644 --- a/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c @@ -23,6 +23,8 @@ #include "test_cls_redirect.h" #include "bpf_kfuncs.h" +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" + #define offsetofend(TYPE, MEMBER) \ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c index 22aba3f6e3..6fc8b9d66e 100644 --- a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c @@ -80,7 +80,7 @@ int test_core_type_id(void *ctx) * to detect whether this test has to be executed, however strange * that might look like. * - * [0] https://reviews.llvm.org/D85174 + * [0] https://github.com/llvm/llvm-project/commit/00602ee7ef0bf6c68d690a2bd729c12b95c95c99 */ #if __has_builtin(__builtin_preserve_type_info) struct core_reloc_type_id_output *out = (void *)&data.out; diff --git a/tools/testing/selftests/bpf/progs/test_fill_link_info.c b/tools/testing/selftests/bpf/progs/test_fill_link_info.c index 69509f8bb6..6afa834756 100644 --- a/tools/testing/selftests/bpf/progs/test_fill_link_info.c +++ b/tools/testing/selftests/bpf/progs/test_fill_link_info.c @@ -33,6 +33,12 @@ int BPF_PROG(tp_run) return 0; } +SEC("perf_event") +int event_run(void *ctx) +{ + return 0; +} + SEC("kprobe.multi") int BPF_PROG(kmulti_run) { diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index 17a9f59bf5..fc69ff1888 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -5,7 +5,7 @@ #include <bpf/bpf_helpers.h> #include "bpf_misc.h" -#define MAX_STACK (512 - 3 * 32 + 8) +#define MAX_STACK 260 static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) @@ -30,6 +30,10 @@ int f3(int, struct __sk_buff *skb, int); __attribute__ ((noinline)) int f2(int val, struct __sk_buff *skb) { + volatile char buf[MAX_STACK] = {}; + + __sink(buf[MAX_STACK - 1]); + return f1(skb) + f3(val, skb, 1); } @@ -44,7 +48,7 @@ int f3(int val, struct __sk_buff *skb, int var) } SEC("tc") -__failure __msg("combined stack size of 4 calls is 544") +__failure __msg("combined stack size of 3 calls is") int global_func1(struct __sk_buff *skb) { return f0(1, skb) + f1(skb) + f2(2, skb) + f3(3, skb, 4); diff --git a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c index 9a06e5eb1f..143c8a4852 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c +++ b/tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c @@ -26,6 +26,23 @@ int kprobe_typedef_ctx(void *ctx) return kprobe_typedef_ctx_subprog(ctx); } +/* s390x defines: + * + * typedef user_pt_regs bpf_user_pt_regs_t; + * typedef struct { ... } user_pt_regs; + * + * And so "canonical" underlying struct type is anonymous. + * So on s390x only valid ways to have PTR_TO_CTX argument in global subprogs + * are: + * - bpf_user_pt_regs_t *ctx (typedef); + * - struct bpf_user_pt_regs_t *ctx (backwards compatible struct hack); + * - void *ctx __arg_ctx (arg:ctx tag) + * + * Other architectures also allow using underlying struct types (e.g., + * `struct pt_regs *ctx` for x86-64) + */ +#ifndef bpf_target_s390 + #define pt_regs_struct_t typeof(*(__PT_REGS_CAST((struct pt_regs *)NULL))) __weak int kprobe_struct_ctx_subprog(pt_regs_struct_t *ctx) @@ -40,6 +57,8 @@ int kprobe_resolved_ctx(void *ctx) return kprobe_struct_ctx_subprog(ctx); } +#endif + /* this is current hack to make this work on old kernels */ struct bpf_user_pt_regs_t {}; diff --git a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c index 48ff2b2ad5..fed66f36ad 100644 --- a/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c +++ b/tools/testing/selftests/bpf/progs/test_lwt_seg6local.c @@ -6,6 +6,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" + /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) @@ -131,7 +133,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh, *pad_off = 0; // we can only go as far as ~10 TLVs due to the BPF max stack size - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 10; i++) { struct sr6_tlv_t tlv; @@ -302,7 +304,7 @@ int __encap_srh(struct __sk_buff *skb) seg = (struct ip6_addr_t *)((char *)srh + sizeof(*srh)); - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (unsigned long long lo = 0; lo < 4; lo++) { seg->lo = bpf_cpu_to_be64(4 - lo); seg->hi = bpf_cpu_to_be64(hi); diff --git a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c index 4bdd65b5aa..2fdc44e766 100644 --- a/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c +++ b/tools/testing/selftests/bpf/progs/test_ptr_untrusted.c @@ -6,13 +6,13 @@ char tp_name[128]; -SEC("lsm/bpf") +SEC("lsm.s/bpf") int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) { switch (cmd) { case BPF_RAW_TRACEPOINT_OPEN: - bpf_probe_read_user_str(tp_name, sizeof(tp_name) - 1, - (void *)attr->raw_tracepoint.name); + bpf_copy_from_user(tp_name, sizeof(tp_name) - 1, + (void *)attr->raw_tracepoint.name); break; default: break; diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index a7278f0643..5059050f74 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -6,6 +6,8 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" + /* Packet parsing state machine helpers. */ #define cursor_advance(_cursor, _len) \ ({ void *_tmp = _cursor; _cursor += _len; _tmp; }) @@ -134,7 +136,7 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, // we can only go as far as ~10 TLVs due to the BPF max stack size // workaround: define induction variable "i" as "long" instead // of "int" to prevent alu32 sub-register spilling. - #pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (long i = 0; i < 100; i++) { struct sr6_tlv_t tlv; diff --git a/tools/testing/selftests/bpf/progs/test_siphash.h b/tools/testing/selftests/bpf/progs/test_siphash.h new file mode 100644 index 0000000000..5d3a7ec367 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_siphash.h @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#ifndef _TEST_SIPHASH_H +#define _TEST_SIPHASH_H + +/* include/linux/bitops.h */ +static inline u64 rol64(u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/* include/linux/siphash.h */ +#define SIPHASH_PERMUTATION(a, b, c, d) ( \ + (a) += (b), (b) = rol64((b), 13), (b) ^= (a), (a) = rol64((a), 32), \ + (c) += (d), (d) = rol64((d), 16), (d) ^= (c), \ + (a) += (d), (d) = rol64((d), 21), (d) ^= (a), \ + (c) += (b), (b) = rol64((b), 17), (b) ^= (c), (c) = rol64((c), 32)) + +#define SIPHASH_CONST_0 0x736f6d6570736575ULL +#define SIPHASH_CONST_1 0x646f72616e646f6dULL +#define SIPHASH_CONST_2 0x6c7967656e657261ULL +#define SIPHASH_CONST_3 0x7465646279746573ULL + +/* lib/siphash.c */ +#define SIPROUND SIPHASH_PERMUTATION(v0, v1, v2, v3) + +#define PREAMBLE(len) \ + u64 v0 = SIPHASH_CONST_0; \ + u64 v1 = SIPHASH_CONST_1; \ + u64 v2 = SIPHASH_CONST_2; \ + u64 v3 = SIPHASH_CONST_3; \ + u64 b = ((u64)(len)) << 56; \ + v3 ^= key->key[1]; \ + v2 ^= key->key[0]; \ + v1 ^= key->key[1]; \ + v0 ^= key->key[0]; + +#define POSTAMBLE \ + v3 ^= b; \ + SIPROUND; \ + SIPROUND; \ + v0 ^= b; \ + v2 ^= 0xff; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + SIPROUND; \ + return (v0 ^ v1) ^ (v2 ^ v3); + +static inline u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key) +{ + PREAMBLE(16) + v3 ^= first; + SIPROUND; + SIPROUND; + v0 ^= first; + v3 ^= second; + SIPROUND; + SIPROUND; + v0 ^= second; + POSTAMBLE +} +#endif diff --git a/tools/testing/selftests/bpf/progs/test_skb_ctx.c b/tools/testing/selftests/bpf/progs/test_skb_ctx.c index c482110cfc..a724a70c67 100644 --- a/tools/testing/selftests/bpf/progs/test_skb_ctx.c +++ b/tools/testing/selftests/bpf/progs/test_skb_ctx.c @@ -3,12 +3,14 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + char _license[] SEC("license") = "GPL"; SEC("tc") int process(struct __sk_buff *skb) { - #pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < 5; i++) { if (skb->cb[i] != i + 1) return 1; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c index b2440a0ff4..d8d77bdffd 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c @@ -101,4 +101,69 @@ int bpf_spin_lock_test(struct __sk_buff *skb) err: return err; } + +struct bpf_spin_lock lockA __hidden SEC(".data.A"); + +__noinline +static int static_subprog(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + return ret; + return ret + ctx->len; +} + +__noinline +static int static_subprog_lock(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + ret = static_subprog(ctx); + bpf_spin_lock(&lockA); + return ret + ctx->len; +} + +__noinline +static int static_subprog_unlock(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + ret = static_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret + ctx->len; +} + +SEC("tc") +int lock_static_subprog_call(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = static_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("tc") +int lock_static_subprog_lock(struct __sk_buff *ctx) +{ + int ret = 0; + + ret = static_subprog_lock(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("tc") +int lock_static_subprog_unlock(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + ret = static_subprog_unlock(ctx); + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c index 86cd183ef6..43f40c4fe2 100644 --- a/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c +++ b/tools/testing/selftests/bpf/progs/test_spin_lock_fail.c @@ -201,4 +201,48 @@ CHECK(innermapval_mapval, &iv->lock, &v->lock); #undef CHECK +__noinline +int global_subprog(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + ret += ctx->protocol; + return ret + ctx->mark; +} + +__noinline +static int static_subprog_call_global(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + if (ctx->protocol) + return ret; + return ret + ctx->len + global_subprog(ctx); +} + +SEC("?tc") +int lock_global_subprog_call1(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = global_subprog(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + +SEC("?tc") +int lock_global_subprog_call2(struct __sk_buff *ctx) +{ + int ret = 0; + + bpf_spin_lock(&lockA); + if (ctx->mark == 42) + ret = static_subprog_call_global(ctx); + bpf_spin_unlock(&lockA); + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 553a282d81..7f74077d66 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif @@ -30,7 +32,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -59,7 +61,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index 2b64bc563a..68a75436e8 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif @@ -30,7 +32,7 @@ static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index 5489823c83..efc3c61f78 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -9,6 +9,8 @@ #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF @@ -31,7 +33,7 @@ static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret != sizeof(tcp_mem_name) - 1) return 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < sizeof(tcp_mem_name); ++i) if (name[i] != tcp_mem_name[i]) return 0; @@ -57,7 +59,7 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) if (ret < 0 || ret >= MAX_VALUE_STR_LEN) return 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) { ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0, tcp_mem + i); diff --git a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c index e6e678aa98..404124a938 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_tunnel.c +++ b/tools/testing/selftests/bpf/progs/test_tc_tunnel.c @@ -19,6 +19,9 @@ #include <bpf/bpf_endian.h> #include <bpf/bpf_helpers.h> +#include "bpf_compiler.h" + +#pragma GCC diagnostic ignored "-Waddress-of-packed-member" static const int cfg_port = 8000; @@ -81,7 +84,7 @@ static __always_inline void set_ipv4_csum(struct iphdr *iph) iph->check = 0; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0, csum = 0; i < sizeof(*iph) >> 1; i++) csum += *iph16++; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c new file mode 100644 index 0000000000..c8e4553648 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#include "vmlinux.h" + +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> +#include "bpf_tracing_net.h" +#include "bpf_kfuncs.h" +#include "test_siphash.h" +#include "test_tcp_custom_syncookie.h" + +#define MAX_PACKET_OFF 0xffff + +/* Hash is calculated for each client and split into ISN and TS. + * + * MSB LSB + * ISN: | 31 ... 8 | 7 6 | 5 | 4 | 3 2 1 0 | + * | Hash_1 | MSS | ECN | SACK | WScale | + * + * TS: | 31 ... 8 | 7 ... 0 | + * | Random | Hash_2 | + */ +#define COOKIE_BITS 8 +#define COOKIE_MASK (((__u32)1 << COOKIE_BITS) - 1) + +enum { + /* 0xf is invalid thus means that SYN did not have WScale. */ + BPF_SYNCOOKIE_WSCALE_MASK = (1 << 4) - 1, + BPF_SYNCOOKIE_SACK = (1 << 4), + BPF_SYNCOOKIE_ECN = (1 << 5), +}; + +#define MSS_LOCAL_IPV4 65495 +#define MSS_LOCAL_IPV6 65476 + +const __u16 msstab4[] = { + 536, + 1300, + 1460, + MSS_LOCAL_IPV4, +}; + +const __u16 msstab6[] = { + 1280 - 60, /* IPV6_MIN_MTU - 60 */ + 1480 - 60, + 9000 - 60, + MSS_LOCAL_IPV6, +}; + +static siphash_key_t test_key_siphash = { + { 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL } +}; + +struct tcp_syncookie { + struct __sk_buff *skb; + void *data; + void *data_end; + struct ethhdr *eth; + struct iphdr *ipv4; + struct ipv6hdr *ipv6; + struct tcphdr *tcp; + __be32 *ptr32; + struct bpf_tcp_req_attrs attrs; + u32 off; + u32 cookie; + u64 first; +}; + +bool handled_syn, handled_ack; + +static int tcp_load_headers(struct tcp_syncookie *ctx) +{ + ctx->data = (void *)(long)ctx->skb->data; + ctx->data_end = (void *)(long)ctx->skb->data_end; + ctx->eth = (struct ethhdr *)(long)ctx->skb->data; + + if (ctx->eth + 1 > ctx->data_end) + goto err; + + switch (bpf_ntohs(ctx->eth->h_proto)) { + case ETH_P_IP: + ctx->ipv4 = (struct iphdr *)(ctx->eth + 1); + + if (ctx->ipv4 + 1 > ctx->data_end) + goto err; + + if (ctx->ipv4->ihl != sizeof(*ctx->ipv4) / 4) + goto err; + + if (ctx->ipv4->version != 4) + goto err; + + if (ctx->ipv4->protocol != IPPROTO_TCP) + goto err; + + ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1); + break; + case ETH_P_IPV6: + ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1); + + if (ctx->ipv6 + 1 > ctx->data_end) + goto err; + + if (ctx->ipv6->version != 6) + goto err; + + if (ctx->ipv6->nexthdr != NEXTHDR_TCP) + goto err; + + ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1); + break; + default: + goto err; + } + + if (ctx->tcp + 1 > ctx->data_end) + goto err; + + return 0; +err: + return -1; +} + +static int tcp_reload_headers(struct tcp_syncookie *ctx) +{ + /* Without volatile, + * R3 32-bit pointer arithmetic prohibited + */ + volatile u64 data_len = ctx->skb->data_end - ctx->skb->data; + + if (ctx->tcp->doff < sizeof(*ctx->tcp) / 4) + goto err; + + /* Needed to calculate csum and parse TCP options. */ + if (bpf_skb_change_tail(ctx->skb, data_len + 60 - ctx->tcp->doff * 4, 0)) + goto err; + + ctx->data = (void *)(long)ctx->skb->data; + ctx->data_end = (void *)(long)ctx->skb->data_end; + ctx->eth = (struct ethhdr *)(long)ctx->skb->data; + if (ctx->ipv4) { + ctx->ipv4 = (struct iphdr *)(ctx->eth + 1); + ctx->ipv6 = NULL; + ctx->tcp = (struct tcphdr *)(ctx->ipv4 + 1); + } else { + ctx->ipv4 = NULL; + ctx->ipv6 = (struct ipv6hdr *)(ctx->eth + 1); + ctx->tcp = (struct tcphdr *)(ctx->ipv6 + 1); + } + + if ((void *)ctx->tcp + 60 > ctx->data_end) + goto err; + + return 0; +err: + return -1; +} + +static __sum16 tcp_v4_csum(struct tcp_syncookie *ctx, __wsum csum) +{ + return csum_tcpudp_magic(ctx->ipv4->saddr, ctx->ipv4->daddr, + ctx->tcp->doff * 4, IPPROTO_TCP, csum); +} + +static __sum16 tcp_v6_csum(struct tcp_syncookie *ctx, __wsum csum) +{ + return csum_ipv6_magic(&ctx->ipv6->saddr, &ctx->ipv6->daddr, + ctx->tcp->doff * 4, IPPROTO_TCP, csum); +} + +static int tcp_validate_header(struct tcp_syncookie *ctx) +{ + s64 csum; + + if (tcp_reload_headers(ctx)) + goto err; + + csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); + if (csum < 0) + goto err; + + if (ctx->ipv4) { + /* check tcp_v4_csum(csum) is 0 if not on lo. */ + + csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, ctx->ipv4->ihl * 4, 0); + if (csum < 0) + goto err; + + if (csum_fold(csum) != 0) + goto err; + } else if (ctx->ipv6) { + /* check tcp_v6_csum(csum) is 0 if not on lo. */ + } + + return 0; +err: + return -1; +} + +static __always_inline void *next(struct tcp_syncookie *ctx, __u32 sz) +{ + __u64 off = ctx->off; + __u8 *data; + + /* Verifier forbids access to packet when offset exceeds MAX_PACKET_OFF */ + if (off > MAX_PACKET_OFF - sz) + return NULL; + + data = ctx->data + off; + barrier_var(data); + if (data + sz >= ctx->data_end) + return NULL; + + ctx->off += sz; + return data; +} + +static int tcp_parse_option(__u32 index, struct tcp_syncookie *ctx) +{ + __u8 *opcode, *opsize, *wscale; + __u32 *tsval, *tsecr; + __u16 *mss; + __u32 off; + + off = ctx->off; + opcode = next(ctx, 1); + if (!opcode) + goto stop; + + if (*opcode == TCPOPT_EOL) + goto stop; + + if (*opcode == TCPOPT_NOP) + goto next; + + opsize = next(ctx, 1); + if (!opsize) + goto stop; + + if (*opsize < 2) + goto stop; + + switch (*opcode) { + case TCPOPT_MSS: + mss = next(ctx, 2); + if (*opsize == TCPOLEN_MSS && ctx->tcp->syn && mss) + ctx->attrs.mss = get_unaligned_be16(mss); + break; + case TCPOPT_WINDOW: + wscale = next(ctx, 1); + if (*opsize == TCPOLEN_WINDOW && ctx->tcp->syn && wscale) { + ctx->attrs.wscale_ok = 1; + ctx->attrs.snd_wscale = *wscale; + } + break; + case TCPOPT_TIMESTAMP: + tsval = next(ctx, 4); + tsecr = next(ctx, 4); + if (*opsize == TCPOLEN_TIMESTAMP && tsval && tsecr) { + ctx->attrs.rcv_tsval = get_unaligned_be32(tsval); + ctx->attrs.rcv_tsecr = get_unaligned_be32(tsecr); + + if (ctx->tcp->syn && ctx->attrs.rcv_tsecr) + ctx->attrs.tstamp_ok = 0; + else + ctx->attrs.tstamp_ok = 1; + } + break; + case TCPOPT_SACK_PERM: + if (*opsize == TCPOLEN_SACK_PERM && ctx->tcp->syn) + ctx->attrs.sack_ok = 1; + break; + } + + ctx->off = off + *opsize; +next: + return 0; +stop: + return 1; +} + +static void tcp_parse_options(struct tcp_syncookie *ctx) +{ + ctx->off = (__u8 *)(ctx->tcp + 1) - (__u8 *)ctx->data, + + bpf_loop(40, tcp_parse_option, ctx, 0); +} + +static int tcp_validate_sysctl(struct tcp_syncookie *ctx) +{ + if ((ctx->ipv4 && ctx->attrs.mss != MSS_LOCAL_IPV4) || + (ctx->ipv6 && ctx->attrs.mss != MSS_LOCAL_IPV6)) + goto err; + + if (!ctx->attrs.wscale_ok || ctx->attrs.snd_wscale != 7) + goto err; + + if (!ctx->attrs.tstamp_ok) + goto err; + + if (!ctx->attrs.sack_ok) + goto err; + + if (!ctx->tcp->ece || !ctx->tcp->cwr) + goto err; + + return 0; +err: + return -1; +} + +static void tcp_prepare_cookie(struct tcp_syncookie *ctx) +{ + u32 seq = bpf_ntohl(ctx->tcp->seq); + u64 first = 0, second; + int mssind = 0; + u32 hash; + + if (ctx->ipv4) { + for (mssind = ARRAY_SIZE(msstab4) - 1; mssind; mssind--) + if (ctx->attrs.mss >= msstab4[mssind]) + break; + + ctx->attrs.mss = msstab4[mssind]; + + first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr; + } else if (ctx->ipv6) { + for (mssind = ARRAY_SIZE(msstab6) - 1; mssind; mssind--) + if (ctx->attrs.mss >= msstab6[mssind]) + break; + + ctx->attrs.mss = msstab6[mssind]; + + first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 | + ctx->ipv6->daddr.in6_u.u6_addr32[0]; + } + + second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest; + hash = siphash_2u64(first, second, &test_key_siphash); + + if (ctx->attrs.tstamp_ok) { + ctx->attrs.rcv_tsecr = bpf_get_prandom_u32(); + ctx->attrs.rcv_tsecr &= ~COOKIE_MASK; + ctx->attrs.rcv_tsecr |= hash & COOKIE_MASK; + } + + hash &= ~COOKIE_MASK; + hash |= mssind << 6; + + if (ctx->attrs.wscale_ok) + hash |= ctx->attrs.snd_wscale & BPF_SYNCOOKIE_WSCALE_MASK; + + if (ctx->attrs.sack_ok) + hash |= BPF_SYNCOOKIE_SACK; + + if (ctx->attrs.tstamp_ok && ctx->tcp->ece && ctx->tcp->cwr) + hash |= BPF_SYNCOOKIE_ECN; + + ctx->cookie = hash; +} + +static void tcp_write_options(struct tcp_syncookie *ctx) +{ + ctx->ptr32 = (__be32 *)(ctx->tcp + 1); + + *ctx->ptr32++ = bpf_htonl(TCPOPT_MSS << 24 | TCPOLEN_MSS << 16 | + ctx->attrs.mss); + + if (ctx->attrs.wscale_ok) + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_WINDOW << 16 | + TCPOLEN_WINDOW << 8 | + ctx->attrs.snd_wscale); + + if (ctx->attrs.tstamp_ok) { + if (ctx->attrs.sack_ok) + *ctx->ptr32++ = bpf_htonl(TCPOPT_SACK_PERM << 24 | + TCPOLEN_SACK_PERM << 16 | + TCPOPT_TIMESTAMP << 8 | + TCPOLEN_TIMESTAMP); + else + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_NOP << 16 | + TCPOPT_TIMESTAMP << 8 | + TCPOLEN_TIMESTAMP); + + *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsecr); + *ctx->ptr32++ = bpf_htonl(ctx->attrs.rcv_tsval); + } else if (ctx->attrs.sack_ok) { + *ctx->ptr32++ = bpf_htonl(TCPOPT_NOP << 24 | + TCPOPT_NOP << 16 | + TCPOPT_SACK_PERM << 8 | + TCPOLEN_SACK_PERM); + } +} + +static int tcp_handle_syn(struct tcp_syncookie *ctx) +{ + s64 csum; + + if (tcp_validate_header(ctx)) + goto err; + + tcp_parse_options(ctx); + + if (tcp_validate_sysctl(ctx)) + goto err; + + tcp_prepare_cookie(ctx); + tcp_write_options(ctx); + + swap(ctx->tcp->source, ctx->tcp->dest); + ctx->tcp->check = 0; + ctx->tcp->ack_seq = bpf_htonl(bpf_ntohl(ctx->tcp->seq) + 1); + ctx->tcp->seq = bpf_htonl(ctx->cookie); + ctx->tcp->doff = ((long)ctx->ptr32 - (long)ctx->tcp) >> 2; + ctx->tcp->ack = 1; + if (!ctx->attrs.tstamp_ok || !ctx->tcp->ece || !ctx->tcp->cwr) + ctx->tcp->ece = 0; + ctx->tcp->cwr = 0; + + csum = bpf_csum_diff(0, 0, (void *)ctx->tcp, ctx->tcp->doff * 4, 0); + if (csum < 0) + goto err; + + if (ctx->ipv4) { + swap(ctx->ipv4->saddr, ctx->ipv4->daddr); + ctx->tcp->check = tcp_v4_csum(ctx, csum); + + ctx->ipv4->check = 0; + ctx->ipv4->tos = 0; + ctx->ipv4->tot_len = bpf_htons((long)ctx->ptr32 - (long)ctx->ipv4); + ctx->ipv4->id = 0; + ctx->ipv4->ttl = 64; + + csum = bpf_csum_diff(0, 0, (void *)ctx->ipv4, sizeof(*ctx->ipv4), 0); + if (csum < 0) + goto err; + + ctx->ipv4->check = csum_fold(csum); + } else if (ctx->ipv6) { + swap(ctx->ipv6->saddr, ctx->ipv6->daddr); + ctx->tcp->check = tcp_v6_csum(ctx, csum); + + *(__be32 *)ctx->ipv6 = bpf_htonl(0x60000000); + ctx->ipv6->payload_len = bpf_htons((long)ctx->ptr32 - (long)ctx->tcp); + ctx->ipv6->hop_limit = 64; + } + + swap_array(ctx->eth->h_source, ctx->eth->h_dest); + + if (bpf_skb_change_tail(ctx->skb, (long)ctx->ptr32 - (long)ctx->eth, 0)) + goto err; + + return bpf_redirect(ctx->skb->ifindex, 0); +err: + return TC_ACT_SHOT; +} + +static int tcp_validate_cookie(struct tcp_syncookie *ctx) +{ + u32 cookie = bpf_ntohl(ctx->tcp->ack_seq) - 1; + u32 seq = bpf_ntohl(ctx->tcp->seq) - 1; + u64 first = 0, second; + int mssind; + u32 hash; + + if (ctx->ipv4) + first = (u64)ctx->ipv4->saddr << 32 | ctx->ipv4->daddr; + else if (ctx->ipv6) + first = (u64)ctx->ipv6->saddr.in6_u.u6_addr8[0] << 32 | + ctx->ipv6->daddr.in6_u.u6_addr32[0]; + + second = (u64)seq << 32 | ctx->tcp->source << 16 | ctx->tcp->dest; + hash = siphash_2u64(first, second, &test_key_siphash); + + if (ctx->attrs.tstamp_ok) + hash -= ctx->attrs.rcv_tsecr & COOKIE_MASK; + else + hash &= ~COOKIE_MASK; + + hash -= cookie & ~COOKIE_MASK; + if (hash) + goto err; + + mssind = (cookie & (3 << 6)) >> 6; + if (ctx->ipv4) { + if (mssind > ARRAY_SIZE(msstab4)) + goto err; + + ctx->attrs.mss = msstab4[mssind]; + } else { + if (mssind > ARRAY_SIZE(msstab6)) + goto err; + + ctx->attrs.mss = msstab6[mssind]; + } + + ctx->attrs.snd_wscale = cookie & BPF_SYNCOOKIE_WSCALE_MASK; + ctx->attrs.rcv_wscale = ctx->attrs.snd_wscale; + ctx->attrs.wscale_ok = ctx->attrs.snd_wscale == BPF_SYNCOOKIE_WSCALE_MASK; + ctx->attrs.sack_ok = cookie & BPF_SYNCOOKIE_SACK; + ctx->attrs.ecn_ok = cookie & BPF_SYNCOOKIE_ECN; + + return 0; +err: + return -1; +} + +static int tcp_handle_ack(struct tcp_syncookie *ctx) +{ + struct bpf_sock_tuple tuple; + struct bpf_sock *skc; + int ret = TC_ACT_OK; + struct sock *sk; + u32 tuple_size; + + if (ctx->ipv4) { + tuple.ipv4.saddr = ctx->ipv4->saddr; + tuple.ipv4.daddr = ctx->ipv4->daddr; + tuple.ipv4.sport = ctx->tcp->source; + tuple.ipv4.dport = ctx->tcp->dest; + tuple_size = sizeof(tuple.ipv4); + } else if (ctx->ipv6) { + __builtin_memcpy(tuple.ipv6.saddr, &ctx->ipv6->saddr, sizeof(tuple.ipv6.saddr)); + __builtin_memcpy(tuple.ipv6.daddr, &ctx->ipv6->daddr, sizeof(tuple.ipv6.daddr)); + tuple.ipv6.sport = ctx->tcp->source; + tuple.ipv6.dport = ctx->tcp->dest; + tuple_size = sizeof(tuple.ipv6); + } else { + goto out; + } + + skc = bpf_skc_lookup_tcp(ctx->skb, &tuple, tuple_size, -1, 0); + if (!skc) + goto out; + + if (skc->state != TCP_LISTEN) + goto release; + + sk = (struct sock *)bpf_skc_to_tcp_sock(skc); + if (!sk) + goto err; + + if (tcp_validate_header(ctx)) + goto err; + + tcp_parse_options(ctx); + + if (tcp_validate_cookie(ctx)) + goto err; + + ret = bpf_sk_assign_tcp_reqsk(ctx->skb, sk, &ctx->attrs, sizeof(ctx->attrs)); + if (ret < 0) + goto err; + +release: + bpf_sk_release(skc); +out: + return ret; + +err: + ret = TC_ACT_SHOT; + goto release; +} + +SEC("tc") +int tcp_custom_syncookie(struct __sk_buff *skb) +{ + struct tcp_syncookie ctx = { + .skb = skb, + }; + + if (tcp_load_headers(&ctx)) + return TC_ACT_OK; + + if (ctx.tcp->rst) + return TC_ACT_OK; + + if (ctx.tcp->syn) { + if (ctx.tcp->ack) + return TC_ACT_OK; + + handled_syn = true; + + return tcp_handle_syn(&ctx); + } + + handled_ack = true; + + return tcp_handle_ack(&ctx); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h new file mode 100644 index 0000000000..29a6a53cf2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#ifndef _TEST_TCP_SYNCOOKIE_H +#define _TEST_TCP_SYNCOOKIE_H + +#define __packed __attribute__((__packed__)) +#define __force + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define swap(a, b) \ + do { \ + typeof(a) __tmp = (a); \ + (a) = (b); \ + (b) = __tmp; \ + } while (0) + +#define swap_array(a, b) \ + do { \ + typeof(a) __tmp[sizeof(a)]; \ + __builtin_memcpy(__tmp, a, sizeof(a)); \ + __builtin_memcpy(a, b, sizeof(a)); \ + __builtin_memcpy(b, __tmp, sizeof(a)); \ + } while (0) + +/* asm-generic/unaligned.h */ +#define __get_unaligned_t(type, ptr) ({ \ + const struct { type x; } __packed * __pptr = (typeof(__pptr))(ptr); \ + __pptr->x; \ +}) + +#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr)) + +static inline u16 get_unaligned_be16(const void *p) +{ + return bpf_ntohs(__get_unaligned_t(__be16, p)); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return bpf_ntohl(__get_unaligned_t(__be32, p)); +} + +/* lib/checksum.c */ +static inline u32 from64to32(u64 x) +{ + /* add up 32-bit and 32-bit for 32+c bit */ + x = (x & 0xffffffff) + (x >> 32); + /* add up carry.. */ + x = (x & 0xffffffff) + (x >> 32); + return (u32)x; +} + +static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long long s = (__force u32)sum; + + s += (__force u32)saddr; + s += (__force u32)daddr; +#ifdef __BIG_ENDIAN + s += proto + len; +#else + s += (proto + len) << 8; +#endif + return (__force __wsum)from64to32(s); +} + +/* asm-generic/checksum.h */ +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, + __u8 proto, __wsum sum) +{ + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); +} + +/* net/ipv6/ip6_checksum.c */ +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + int carry; + __u32 ulen; + __u32 uproto; + __u32 sum = (__force u32)csum; + + sum += (__force u32)saddr->in6_u.u6_addr32[0]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[0]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[1]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[1]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[2]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[2]); + sum += carry; + + sum += (__force u32)saddr->in6_u.u6_addr32[3]; + carry = (sum < (__force u32)saddr->in6_u.u6_addr32[3]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[0]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[0]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[1]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[1]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[2]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[2]); + sum += carry; + + sum += (__force u32)daddr->in6_u.u6_addr32[3]; + carry = (sum < (__force u32)daddr->in6_u.u6_addr32[3]); + sum += carry; + + ulen = (__force u32)bpf_htonl((__u32)len); + sum += ulen; + carry = (sum < ulen); + sum += carry; + + uproto = (__force u32)bpf_htonl(proto); + sum += uproto; + carry = (sum < uproto); + sum += carry; + + return csum_fold((__force __wsum)sum); +} +#endif diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index cf7ed8cbb1..a3f3f43fc1 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -59,7 +59,7 @@ int bpf_testcb(struct bpf_sock_ops *skops) asm volatile ( "%[op] = *(u32 *)(%[skops] +96)" - : [op] "+r"(op) + : [op] "=r"(op) : [skops] "r"(skops) :); diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c index d7a9a74b72..8caf58be58 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp.c +++ b/tools/testing/selftests/bpf/progs/test_xdp.c @@ -19,6 +19,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" +#include "bpf_compiler.h" struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); @@ -137,7 +138,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp) iph->ttl = 8; next_iph = (__u16 *)iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < sizeof(*iph) >> 1; i++) csum += *next_iph++; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c index 78c368e717..67a77944ef 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -18,11 +18,11 @@ #include "test_iptunnel_common.h" #include "bpf_kfuncs.h" -const size_t tcphdr_sz = sizeof(struct tcphdr); -const size_t udphdr_sz = sizeof(struct udphdr); -const size_t ethhdr_sz = sizeof(struct ethhdr); -const size_t iphdr_sz = sizeof(struct iphdr); -const size_t ipv6hdr_sz = sizeof(struct ipv6hdr); +#define tcphdr_sz sizeof(struct tcphdr) +#define udphdr_sz sizeof(struct udphdr) +#define ethhdr_sz sizeof(struct ethhdr) +#define iphdr_sz sizeof(struct iphdr) +#define ipv6hdr_sz sizeof(struct ipv6hdr) struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c index c98fb44156..93267a6882 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_loop.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c @@ -15,6 +15,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> #include "test_iptunnel_common.h" +#include "bpf_compiler.h" struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); @@ -133,7 +134,7 @@ static __always_inline int handle_ipv4(struct xdp_md *xdp) iph->ttl = 8; next_iph = (__u16 *)iph; -#pragma clang loop unroll(disable) + __pragma_loop_no_unroll for (i = 0; i < sizeof(*iph) >> 1; i++) csum += *next_iph++; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c index 42c8f6ded0..5c7e4758a0 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c @@ -15,6 +15,7 @@ #include <linux/udp.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" static __always_inline __u32 rol32(__u32 word, unsigned int shift) { @@ -362,7 +363,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, iph->ttl = 4; next_iph_u16 = (__u16 *) iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) csum += *next_iph_u16++; iph->check = ~((csum & 0xffff) + (csum >> 16)); @@ -409,7 +410,7 @@ int send_icmp_reply(void *data, void *data_end) iph->saddr = tmp_addr; iph->check = 0; next_iph_u16 = (__u16 *) iph; -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) csum += *next_iph_u16++; iph->check = ~((csum & 0xffff) + (csum >> 16)); diff --git a/tools/testing/selftests/bpf/progs/token_lsm.c b/tools/testing/selftests/bpf/progs/token_lsm.c new file mode 100644 index 0000000000..e4d59b6ba7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/token_lsm.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +int my_pid; +bool reject_capable; +bool reject_cmd; + +SEC("lsm/bpf_token_capable") +int BPF_PROG(token_capable, struct bpf_token *token, int cap) +{ + if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + if (reject_capable) + return -1; + return 0; +} + +SEC("lsm/bpf_token_cmd") +int BPF_PROG(token_cmd, struct bpf_token *token, enum bpf_cmd cmd) +{ + if (my_pid == 0 || my_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + if (reject_cmd) + return -1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/tracing_failure.c b/tools/testing/selftests/bpf/progs/tracing_failure.c new file mode 100644 index 0000000000..d41665d2ec --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tracing_failure.c @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +SEC("?fentry/bpf_spin_lock") +int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock) +{ + return 0; +} + +SEC("?fentry/bpf_spin_unlock") +int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c index 694e7cec18..5fda439010 100644 --- a/tools/testing/selftests/bpf/progs/trigger_bench.c +++ b/tools/testing/selftests/bpf/progs/trigger_bench.c @@ -33,6 +33,27 @@ int bench_trigger_kprobe(void *ctx) return 0; } +SEC("kretprobe/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kretprobe(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + +SEC("kprobe.multi/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kprobe_multi(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + +SEC("kretprobe.multi/" SYS_PREFIX "sys_getpgid") +int bench_trigger_kretprobe_multi(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + SEC("fentry/" SYS_PREFIX "sys_getpgid") int bench_trigger_fentry(void *ctx) { @@ -40,6 +61,13 @@ int bench_trigger_fentry(void *ctx) return 0; } +SEC("fexit/" SYS_PREFIX "sys_getpgid") +int bench_trigger_fexit(void *ctx) +{ + __sync_add_and_fetch(&hits, 1); + return 0; +} + SEC("fentry.s/" SYS_PREFIX "sys_getpgid") int bench_trigger_fentry_sleep(void *ctx) { diff --git a/tools/testing/selftests/bpf/progs/type_cast.c b/tools/testing/selftests/bpf/progs/type_cast.c index a9629ac230..9d808b8f4a 100644 --- a/tools/testing/selftests/bpf/progs/type_cast.c +++ b/tools/testing/selftests/bpf/progs/type_cast.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" struct { __uint(type, BPF_MAP_TYPE_TASK_STORAGE); @@ -19,9 +20,6 @@ char name[IFNAMSIZ]; unsigned int inum; unsigned int meta_len, frag0_len, kskb_len, kskb2_len; -void *bpf_cast_to_kern_ctx(void *) __ksym; -void *bpf_rdonly_cast(void *, __u32) __ksym; - SEC("?xdp") int md_xdp(struct xdp_md *ctx) { @@ -48,13 +46,12 @@ int md_skb(struct __sk_buff *skb) /* Simulate the following kernel macro: * #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) */ - shared_info = bpf_rdonly_cast(kskb->head + kskb->end, - bpf_core_type_id_kernel(struct skb_shared_info)); + shared_info = bpf_core_cast(kskb->head + kskb->end, struct skb_shared_info); meta_len = shared_info->meta_len; frag0_len = shared_info->frag_list->len; /* kskb2 should be equal to kskb */ - kskb2 = bpf_rdonly_cast(kskb, bpf_core_type_id_kernel(struct sk_buff)); + kskb2 = bpf_core_cast(kskb, typeof(*kskb2)); kskb2_len = kskb2->len; return 0; } @@ -65,7 +62,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) struct task_struct *task, *task_dup; task = bpf_get_current_task_btf(); - task_dup = bpf_rdonly_cast(task, bpf_core_type_id_kernel(struct task_struct)); + task_dup = bpf_core_cast(task, struct task_struct); (void)bpf_task_storage_get(&enter_id, task_dup, 0, 0); return 0; } @@ -73,7 +70,7 @@ int BPF_PROG(untrusted_ptr, struct pt_regs *regs, long id) SEC("?tracepoint/syscalls/sys_enter_nanosleep") int kctx_u64(void *ctx) { - u64 *kctx = bpf_rdonly_cast(ctx, bpf_core_type_id_kernel(u64)); + u64 *kctx = bpf_core_cast(ctx, u64); (void)kctx; return 0; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c new file mode 100644 index 0000000000..93144ae6df --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" +#include "bpf_arena_common.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ +#else + __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +SEC("syscall") +__success __retval(0) +int basic_alloc1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile int __arena *page1, *page2, *no_page, *page3; + + page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page1) + return 1; + *page1 = 1; + page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page2) + return 2; + *page2 = 2; + no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (no_page) + return 3; + if (*page1 != 1) + return 4; + if (*page2 != 2) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)page2, 1); + if (*page1 != 1) + return 6; + if (*page2 != 0) /* use-after-free should return 0 */ + return 7; + page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page3) + return 8; + *page3 = 3; + if (page2 != page3) + return 9; + if (*page1 != 1) + return 10; +#endif + return 0; +} + +SEC("syscall") +__success __retval(0) +int basic_alloc2(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page1, *page2, *page3, *page4; + + page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); + if (!page1) + return 1; + page2 = page1 + __PAGE_SIZE; + page3 = page1 + __PAGE_SIZE * 2; + page4 = page1 - __PAGE_SIZE; + *page1 = 1; + *page2 = 2; + *page3 = 3; + *page4 = 4; + if (*page1 != 1) + return 1; + if (*page2 != 2) + return 2; + if (*page3 != 0) + return 3; + if (*page4 != 0) + return 4; + bpf_arena_free_pages(&arena, (void __arena *)page1, 2); + if (*page1 != 0) + return 5; + if (*page2 != 0) + return 6; + if (*page3 != 0) + return 7; + if (*page4 != 0) + return 8; +#endif + return 0; +} + +struct bpf_arena___l { + struct bpf_map map; +} __attribute__((preserve_access_index)); + +SEC("syscall") +__success __retval(0) __log_level(2) +int basic_alloc3(void *ctx) +{ + struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; + volatile char __arena *pages; + + pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); + if (!pages) + return 1; + return 0; +} + +SEC("iter.s/bpf_map") +__success __log_level(2) +int iter_maps1(struct bpf_iter__bpf_map *ctx) +{ + struct bpf_map *map = ctx->map; + + if (!map) + return 0; + bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0); + return 0; +} + +SEC("iter.s/bpf_map") +__failure __msg("expected pointer to STRUCT bpf_map") +int iter_maps2(struct bpf_iter__bpf_map *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + + bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0); + return 0; +} + +SEC("iter.s/bpf_map") +__failure __msg("untrusted_ptr_bpf_map") +int iter_maps3(struct bpf_iter__bpf_map *ctx) +{ + struct bpf_map *map = ctx->map; + + if (!map) + return 0; + bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c new file mode 100644 index 0000000000..ef66ea4602 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" +#include "bpf_arena_common.h" + +#define ARENA_SIZE (1ull << 32) + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, ARENA_SIZE / PAGE_SIZE); +} arena SEC(".maps"); + +SEC("syscall") +__success __retval(0) +int big_alloc1(void *ctx) +{ +#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) + volatile char __arena *page1, *page2, *no_page, *page3; + void __arena *base; + + page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page1) + return 1; + *page1 = 1; + page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, + 1, NUMA_NO_NODE, 0); + if (!page2) + return 2; + *page2 = 2; + no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE, + 1, NUMA_NO_NODE, 0); + if (no_page) + return 3; + if (*page1 != 1) + return 4; + if (*page2 != 2) + return 5; + bpf_arena_free_pages(&arena, (void __arena *)page1, 1); + if (*page2 != 2) + return 6; + if (*page1 != 0) /* use-after-free should return 0 */ + return 7; + page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + if (!page3) + return 8; + *page3 = 3; + if (page1 != page3) + return 9; + if (*page2 != 2) + return 10; + if (*(page1 + PAGE_SIZE) != 0) + return 11; + if (*(page1 - PAGE_SIZE) != 0) + return 12; + if (*(page2 + PAGE_SIZE) != 0) + return 13; + if (*(page2 - PAGE_SIZE) != 0) + return 14; +#endif + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c index be95570ab3..28b602ac9c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c +++ b/tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c @@ -568,7 +568,7 @@ l0_%=: r0 = 0; \ SEC("tc") __description("direct packet access: test23 (x += pkt_ptr, 4)") -__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)") +__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)") __flag(BPF_F_ANY_ALIGNMENT) __naked void test23_x_pkt_ptr_4(void) { diff --git a/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c new file mode 100644 index 0000000000..4ab0ef18d7 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_global_ptr_args.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ + +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_core_read.h> +#include "bpf_misc.h" +#include "xdp_metadata.h" +#include "bpf_kfuncs.h" + +extern struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym __weak; +extern void bpf_task_release(struct task_struct *p) __ksym __weak; + +__weak int subprog_trusted_task_nullable(struct task_struct *task __arg_trusted __arg_nullable) +{ + if (!task) + return 0; + return task->pid + task->tgid; +} + +__weak int subprog_trusted_task_nullable_extra_layer(struct task_struct *task __arg_trusted __arg_nullable) +{ + return subprog_trusted_task_nullable(task) + subprog_trusted_task_nullable(NULL); +} + +SEC("?tp_btf/task_newtask") +__success __log_level(2) +__msg("Validating subprog_trusted_task_nullable() func#1...") +__msg(": R1=trusted_ptr_or_null_task_struct(") +int trusted_task_arg_nullable(void *ctx) +{ + struct task_struct *t1 = bpf_get_current_task_btf(); + struct task_struct *t2 = bpf_task_acquire(t1); + int res = 0; + + /* known NULL */ + res += subprog_trusted_task_nullable(NULL); + + /* known non-NULL */ + res += subprog_trusted_task_nullable(t1); + res += subprog_trusted_task_nullable_extra_layer(t1); + + /* unknown if NULL or not */ + res += subprog_trusted_task_nullable(t2); + res += subprog_trusted_task_nullable_extra_layer(t2); + + if (t2) { + /* known non-NULL after explicit NULL check, just in case */ + res += subprog_trusted_task_nullable(t2); + res += subprog_trusted_task_nullable_extra_layer(t2); + + bpf_task_release(t2); + } + + return res; +} + +__weak int subprog_trusted_task_nonnull(struct task_struct *task __arg_trusted) +{ + return task->pid + task->tgid; +} + +SEC("?kprobe") +__failure __log_level(2) +__msg("R1 type=scalar expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')") +int trusted_task_arg_nonnull_fail1(void *ctx) +{ + return subprog_trusted_task_nonnull(NULL); +} + +SEC("?tp_btf/task_newtask") +__failure __log_level(2) +__msg("R1 type=ptr_or_null_ expected=ptr_, trusted_ptr_, rcu_ptr_") +__msg("Caller passes invalid args into func#1 ('subprog_trusted_task_nonnull')") +int trusted_task_arg_nonnull_fail2(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + struct task_struct *nullable; + int res; + + nullable = bpf_task_acquire(t); + + /* should fail, PTR_TO_BTF_ID_OR_NULL */ + res = subprog_trusted_task_nonnull(nullable); + + if (nullable) + bpf_task_release(nullable); + + return res; +} + +SEC("?kprobe") +__success __log_level(2) +__msg("Validating subprog_trusted_task_nonnull() func#1...") +__msg(": R1=trusted_ptr_task_struct(") +int trusted_task_arg_nonnull(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + + return subprog_trusted_task_nonnull(t); +} + +struct task_struct___local {} __attribute__((preserve_access_index)); + +__weak int subprog_nullable_task_flavor( + struct task_struct___local *task __arg_trusted __arg_nullable) +{ + char buf[16]; + + if (!task) + return 0; + + return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0); +} + +SEC("?uprobe.s") +__success __log_level(2) +__msg("Validating subprog_nullable_task_flavor() func#1...") +__msg(": R1=trusted_ptr_or_null_task_struct(") +int flavor_ptr_nullable(void *ctx) +{ + struct task_struct___local *t = (void *)bpf_get_current_task_btf(); + + return subprog_nullable_task_flavor(t); +} + +__weak int subprog_nonnull_task_flavor(struct task_struct___local *task __arg_trusted) +{ + char buf[16]; + + return bpf_copy_from_user_task(&buf, sizeof(buf), NULL, (void *)task, 0); +} + +SEC("?uprobe.s") +__success __log_level(2) +__msg("Validating subprog_nonnull_task_flavor() func#1...") +__msg(": R1=trusted_ptr_task_struct(") +int flavor_ptr_nonnull(void *ctx) +{ + struct task_struct *t = bpf_get_current_task_btf(); + + return subprog_nonnull_task_flavor((void *)t); +} + +__weak int subprog_trusted_destroy(struct task_struct *task __arg_trusted) +{ + bpf_task_release(task); /* should be rejected */ + + return 0; +} + +SEC("?tp_btf/task_newtask") +__failure __log_level(2) +__msg("release kernel function bpf_task_release expects refcounted PTR_TO_BTF_ID") +int BPF_PROG(trusted_destroy_fail, struct task_struct *task, u64 clone_flags) +{ + return subprog_trusted_destroy(task); +} + +__weak int subprog_trusted_acq_rel(struct task_struct *task __arg_trusted) +{ + struct task_struct *owned; + + owned = bpf_task_acquire(task); + if (!owned) + return 0; + + bpf_task_release(owned); /* this one is OK, we acquired it locally */ + + return 0; +} + +SEC("?tp_btf/task_newtask") +__success __log_level(2) +int BPF_PROG(trusted_acq_rel, struct task_struct *task, u64 clone_flags) +{ + return subprog_trusted_acq_rel(task); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c index 67dddd9418..a9fc30ed4d 100644 --- a/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c +++ b/tools/testing/selftests/bpf/progs/verifier_global_subprogs.c @@ -8,6 +8,13 @@ #include "xdp_metadata.h" #include "bpf_kfuncs.h" +/* The compiler may be able to detect the access to uninitialized + memory in the routines performing out of bound memory accesses and + emit warnings about it. This is the case of GCC. */ +#if !defined(__clang__) +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif + int arr[1]; int unkn_idx; const volatile bool call_dead_subprog = false; @@ -115,6 +122,35 @@ int arg_tag_nullable_ptr_fail(void *ctx) return subprog_nullable_ptr_bad(&x); } +typedef struct { + int x; +} user_struct_t; + +__noinline __weak int subprog_user_anon_mem(user_struct_t *t) +{ + return t ? t->x : 0; +} + +SEC("?tracepoint") +__failure __log_level(2) +__msg("invalid bpf_context access") +__msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')") +int anon_user_mem_invalid(void *ctx) +{ + /* can't pass PTR_TO_CTX as user memory */ + return subprog_user_anon_mem(ctx); +} + +SEC("?tracepoint") +__success __log_level(2) +__msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype") +int anon_user_mem_valid(void *ctx) +{ + user_struct_t t = { .x = 42 }; + + return subprog_user_anon_mem(&t); +} + __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull) { return (*p1) * (*p2); /* good, no need for NULL checks */ diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index a955a63582..99e561f18f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 - -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> #include "bpf_misc.h" +#include "bpf_experimental.h" struct { __uint(type, BPF_MAP_TYPE_ARRAY); @@ -309,4 +307,103 @@ int iter_limit_bug(struct __sk_buff *skb) return 0; } +#define ARR_SZ 1000000 +int zero; +char arr[ARR_SZ]; + +SEC("socket") +__success __retval(0xd495cdc0) +int cond_break1(const void *ctx) +{ + unsigned long i; + unsigned int sum = 0; + + for (i = zero; i < ARR_SZ; cond_break, i++) + sum += i; + for (i = zero; i < ARR_SZ; i++) { + barrier_var(i); + sum += i + arr[i]; + cond_break; + } + + return sum; +} + +SEC("socket") +__success __retval(999000000) +int cond_break2(const void *ctx) +{ + int i, j; + int sum = 0; + + for (i = zero; i < 1000; cond_break, i++) + for (j = zero; j < 1000; j++) { + sum += i + j; + cond_break; + } + + return sum; +} + +static __noinline int loop(void) +{ + int i, sum = 0; + + for (i = zero; i <= 1000000; i++, cond_break) + sum += i; + + return sum; +} + +SEC("socket") +__success __retval(0x6a5a2920) +int cond_break3(const void *ctx) +{ + return loop(); +} + +SEC("socket") +__success __retval(1) +int cond_break4(const void *ctx) +{ + int cnt = zero; + + for (;;) { + /* should eventually break out of the loop */ + cond_break; + cnt++; + } + /* if we looped a bit, it's a success */ + return cnt > 1 ? 1 : 0; +} + +static __noinline int static_subprog(void) +{ + int cnt = zero; + + for (;;) { + cond_break; + cnt++; + } + + return cnt; +} + +SEC("socket") +__success __retval(1) +int cond_break5(const void *ctx) +{ + int cnt1 = zero, cnt2; + + for (;;) { + cond_break; + cnt1++; + } + + cnt2 = static_subprog(); + + /* main and subprog have to loop a bit */ + return cnt1 > 1 && cnt2 > 1 ? 1 : 0; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_loops1.c b/tools/testing/selftests/bpf/progs/verifier_loops1.c index 71735dbf33..e07b43b78f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_loops1.c +++ b/tools/testing/selftests/bpf/progs/verifier_loops1.c @@ -259,4 +259,28 @@ l0_%=: r2 += r1; \ " ::: __clobber_all); } +SEC("xdp") +__success +__naked void not_an_inifinite_loop(void) +{ + asm volatile (" \ + call %[bpf_get_prandom_u32]; \ + r0 &= 0xff; \ + *(u64 *)(r10 - 8) = r0; \ + r0 = 0; \ +loop_%=: \ + r0 = *(u64 *)(r10 - 8); \ + if r0 > 10 goto exit_%=; \ + r0 += 1; \ + *(u64 *)(r10 - 8) = r0; \ + r0 = 0; \ + goto loop_%=; \ +exit_%=: \ + r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c index 39fe3372e0..85e48069c9 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spill_fill.c +++ b/tools/testing/selftests/bpf/progs/verifier_spill_fill.c @@ -217,7 +217,7 @@ __naked void uninit_u32_from_the_stack(void) SEC("tc") __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data") -__failure __msg("invalid access to packet") +__success __retval(0) __naked void u16_offset_to_skb_data(void) { asm volatile (" \ @@ -225,13 +225,19 @@ __naked void u16_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \ - r4 = *(u16*)(r10 - 8); \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r4 = *(u16*)(r10 - 8);" +#else + "r4 = *(u16*)(r10 - 6);" +#endif + " \ r0 = r2; \ - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */\ r0 += r4; \ - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ if r0 > r3 goto l0_%=; \ - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ + /* r0 = *(u32 *)r2 R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\ r0 = *(u32*)(r2 + 0); \ l0_%=: r0 = 0; \ exit; \ @@ -243,7 +249,7 @@ l0_%=: r0 = 0; \ SEC("tc") __description("Spill u32 const scalars. Refill as u64. Offset to skb->data") -__failure __msg("invalid access to packet") +__failure __msg("math between pkt pointer and register with unbounded min value is not allowed") __naked void u64_offset_to_skb_data(void) { asm volatile (" \ @@ -253,13 +259,11 @@ __naked void u64_offset_to_skb_data(void) w7 = 20; \ *(u32*)(r10 - 4) = r6; \ *(u32*)(r10 - 8) = r7; \ - r4 = *(u16*)(r10 - 8); \ + r4 = *(u64*)(r10 - 8); \ r0 = r2; \ - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4= */ \ r0 += r4; \ - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\ if r0 > r3 goto l0_%=; \ - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\ r0 = *(u32*)(r2 + 0); \ l0_%=: r0 = 0; \ exit; \ @@ -270,7 +274,7 @@ l0_%=: r0 = 0; \ } SEC("tc") -__description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data") +__description("Spill a u32 const scalar. Refill as u16 from MSB. Offset to skb->data") __failure __msg("invalid access to packet") __naked void _6_offset_to_skb_data(void) { @@ -279,7 +283,13 @@ __naked void _6_offset_to_skb_data(void) r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \ w4 = 20; \ *(u32*)(r10 - 8) = r4; \ - r4 = *(u16*)(r10 - 6); \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r4 = *(u16*)(r10 - 6);" +#else + "r4 = *(u16*)(r10 - 8);" +#endif + " \ r0 = r2; \ /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\ r0 += r4; \ @@ -454,9 +464,9 @@ l0_%=: r1 >>= 16; \ SEC("raw_tp") __log_level(2) __success -__msg("fp-8=0m??mmmm") -__msg("fp-16=00mm??mm") -__msg("fp-24=00mm???m") +__msg("fp-8=0m??scalar()") +__msg("fp-16=00mm??scalar()") +__msg("fp-24=00mm???scalar()") __naked void spill_subregs_preserve_stack_zero(void) { asm volatile ( @@ -495,14 +505,14 @@ char single_byte_buf[1] SEC(".data.single_byte_buf"); SEC("raw_tp") __log_level(2) __success -/* make sure fp-8 is all STACK_ZERO */ -__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=00000000") +/* fp-8 is spilled IMPRECISE value zero (represented by a zero value fake reg) */ +__msg("2: (7a) *(u64 *)(r10 -8) = 0 ; R10=fp0 fp-8_w=0") /* but fp-16 is spilled IMPRECISE zero const reg */ __msg("4: (7b) *(u64 *)(r10 -16) = r0 ; R0_w=0 R10=fp0 fp-16_w=0") -/* validate that assigning R2 from STACK_ZERO doesn't mark register +/* validate that assigning R2 from STACK_SPILL with zero value doesn't mark register * precise immediately; if necessary, it will be marked precise later */ -__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=00000000") +__msg("6: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8_w=0") /* similarly, when R2 is assigned from spilled register, it is initially * imprecise, but will be marked precise later once it is used in precise context */ @@ -520,14 +530,14 @@ __msg("mark_precise: frame0: regs=r0 stack= before 3: (b7) r0 = 0") __naked void partial_stack_load_preserves_zeros(void) { asm volatile ( - /* fp-8 is all STACK_ZERO */ + /* fp-8 is value zero (represented by a zero value fake reg) */ ".8byte %[fp8_st_zero];" /* LLVM-18+: *(u64 *)(r10 -8) = 0; */ /* fp-16 is const zero register */ "r0 = 0;" "*(u64 *)(r10 -16) = r0;" - /* load single U8 from non-aligned STACK_ZERO slot */ + /* load single U8 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u8 *)(r10 -1);" "r1 += r2;" @@ -539,7 +549,7 @@ __naked void partial_stack_load_preserves_zeros(void) "r1 += r2;" "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ - /* load single U16 from non-aligned STACK_ZERO slot */ + /* load single U16 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u16 *)(r10 -2);" "r1 += r2;" @@ -551,7 +561,7 @@ __naked void partial_stack_load_preserves_zeros(void) "r1 += r2;" "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ - /* load single U32 from non-aligned STACK_ZERO slot */ + /* load single U32 from non-aligned spilled value zero slot */ "r1 = %[single_byte_buf];" "r2 = *(u32 *)(r10 -4);" "r1 += r2;" @@ -583,6 +593,47 @@ __naked void partial_stack_load_preserves_zeros(void) : __clobber_common); } +SEC("raw_tp") +__log_level(2) +__success +/* fp-4 is STACK_ZERO */ +__msg("2: (62) *(u32 *)(r10 -4) = 0 ; R10=fp0 fp-8=0000????") +__msg("4: (71) r2 = *(u8 *)(r10 -1) ; R2_w=0 R10=fp0 fp-8=0000????") +__msg("5: (0f) r1 += r2") +__msg("mark_precise: frame0: last_idx 5 first_idx 0 subseq_idx -1") +__msg("mark_precise: frame0: regs=r2 stack= before 4: (71) r2 = *(u8 *)(r10 -1)") +__naked void partial_stack_load_preserves_partial_zeros(void) +{ + asm volatile ( + /* fp-4 is value zero */ + ".8byte %[fp4_st_zero];" /* LLVM-18+: *(u32 *)(r10 -4) = 0; */ + + /* load single U8 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u8 *)(r10 -1);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + /* load single U16 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u16 *)(r10 -2);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + /* load single U32 from non-aligned stack zero slot */ + "r1 = %[single_byte_buf];" + "r2 = *(u32 *)(r10 -4);" + "r1 += r2;" + "*(u8 *)(r1 + 0) = r2;" /* this should be fine */ + + "r0 = 0;" + "exit;" + : + : __imm_ptr(single_byte_buf), + __imm_insn(fp4_st_zero, BPF_ST_MEM(BPF_W, BPF_REG_FP, -4, 0)) + : __clobber_common); +} + char two_byte_buf[2] SEC(".data.two_byte_buf"); SEC("raw_tp") @@ -737,4 +788,460 @@ __naked void stack_load_preserves_const_precision_subreg(void) : __clobber_common); } +SEC("xdp") +__description("32-bit spilled reg range should be tracked") +__success __retval(0) +__naked void spill_32bit_range_track(void) +{ + asm volatile(" \ + call %[bpf_ktime_get_ns]; \ + /* Make r0 bounded. */ \ + r0 &= 65535; \ + /* Assign an ID to r0. */ \ + r1 = r0; \ + /* 32-bit spill r0 to stack. */ \ + *(u32*)(r10 - 8) = r0; \ + /* Boundary check on r0. */ \ + if r0 < 1 goto l0_%=; \ + /* 32-bit fill r1 from stack. */ \ + r1 = *(u32*)(r10 - 8); \ + /* r1 == r0 => r1 >= 1 always. */ \ + if r1 >= 1 goto l0_%=; \ + /* Dead branch: the verifier should prune it. \ + * Do an invalid memory access if the verifier \ + * follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("xdp") +__description("64-bit spill of 64-bit reg should assign ID") +__success __retval(0) +__naked void spill_64bit_of_64bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x80000000; \ + r0 <<= 32; \ + /* 64-bit spill r0 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r0; \ + /* 64-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u64*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit spill of 32-bit reg should assign ID") +__success __retval(0) +__naked void spill_32bit_of_32bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + w0 &= 0x80000000; \ + /* 32-bit spill r0 to stack - should assign an ID. */\ + *(u32*)(r10 - 8) = r0; \ + /* 32-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u32*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("16-bit spill of 16-bit reg should assign ID") +__success __retval(0) +__naked void spill_16bit_of_16bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x8000; \ + /* 16-bit spill r0 to stack - should assign an ID. */\ + *(u16*)(r10 - 8) = r0; \ + /* 16-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u16*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("8-bit spill of 8-bit reg should assign ID") +__success __retval(0) +__naked void spill_8bit_of_8bit_ok(void) +{ + asm volatile (" \ + /* Roll one bit to make the register inexact. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x80; \ + /* 8-bit spill r0 to stack - should assign an ID. */\ + *(u8*)(r10 - 8) = r0; \ + /* 8-bit fill r1 from stack - should preserve the ID. */\ + r1 = *(u8*)(r10 - 8); \ + /* Compare r1 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. \ + */ \ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("spill unbounded reg, then range check src") +__success __retval(0) +__naked void spill_unbounded(void) +{ + asm volatile (" \ + /* Produce an unbounded scalar. */ \ + call %[bpf_get_prandom_u32]; \ + /* Spill r0 to stack. */ \ + *(u64*)(r10 - 8) = r0; \ + /* Boundary check on r0. */ \ + if r0 > 16 goto l0_%=; \ + /* Fill r0 from stack. */ \ + r0 = *(u64*)(r10 - 8); \ + /* Boundary check on r0 with predetermined result. */\ + if r0 <= 16 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill") +__success __retval(0) +__naked void fill_32bit_after_spill_64bit(void) +{ + asm volatile(" \ + /* Randomize the upper 32 bits. */ \ + call %[bpf_get_prandom_u32]; \ + r0 <<= 32; \ + /* 64-bit spill r0 to stack. */ \ + *(u64*)(r10 - 8) = r0; \ + /* 32-bit fill r0 from stack. */ \ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(u32*)(r10 - 8);" +#else + "r0 = *(u32*)(r10 - 4);" +#endif + " \ + /* Boundary check on r0 with predetermined result. */\ + if r0 == 0 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ +l0_%=: exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill of 32-bit value should preserve ID") +__success __retval(0) +__naked void fill_32bit_after_spill_64bit_preserve_id(void) +{ + asm volatile (" \ + /* Randomize the lower 32 bits. */ \ + call %[bpf_get_prandom_u32]; \ + w0 &= 0xffffffff; \ + /* 64-bit spill r0 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r0; \ + /* 32-bit fill r1 from stack - should preserve the ID. */\ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r1 = *(u32*)(r10 - 8);" +#else + "r1 = *(u32*)(r10 - 4);" +#endif + " \ + /* Compare r1 with another register to trigger find_equal_scalars. */\ + r2 = 0; \ + if r1 != r2 goto l0_%=; \ + /* The result of this comparison is predefined. */\ + if r0 == r2 goto l0_%=; \ + /* Dead branch: the verifier should prune it. Do an invalid memory\ + * access if the verifier follows it. \ + */ \ + r0 = *(u64*)(r9 + 0); \ + exit; \ +l0_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +SEC("xdp") +__description("32-bit fill after 64-bit spill should clear ID") +__failure __msg("math between ctx pointer and 4294967295 is not allowed") +__naked void fill_32bit_after_spill_64bit_clear_id(void) +{ + asm volatile (" \ + r6 = r1; \ + /* Roll one bit to force the verifier to track both branches. */\ + call %[bpf_get_prandom_u32]; \ + r0 &= 0x8; \ + /* Put a large number into r1. */ \ + r1 = 0xffffffff; \ + r1 <<= 32; \ + r1 += r0; \ + /* 64-bit spill r1 to stack - should assign an ID. */\ + *(u64*)(r10 - 8) = r1; \ + /* 32-bit fill r2 from stack - should clear the ID. */\ + " +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r2 = *(u32*)(r10 - 8);" +#else + "r2 = *(u32*)(r10 - 4);" +#endif + " \ + /* Compare r2 with another register to trigger find_equal_scalars.\ + * Having one random bit is important here, otherwise the verifier cuts\ + * the corners. If the ID was mistakenly preserved on fill, this would\ + * cause the verifier to think that r1 is also equal to zero in one of\ + * the branches, and equal to eight on the other branch.\ + */ \ + r3 = 0; \ + if r2 != r3 goto l0_%=; \ +l0_%=: r1 >>= 32; \ + /* The verifier shouldn't propagate r2's range to r1, so it should\ + * still remember r1 = 0xffffffff and reject the below.\ + */ \ + r6 += r1; \ + r0 = *(u32*)(r6 + 0); \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* stacksafe(): check if stack spill of an imprecise scalar in old state + * is considered equivalent to STACK_{MISC,INVALID} in cur state. + */ +SEC("socket") +__success __log_level(2) +__msg("8: (79) r1 = *(u64 *)(r10 -8)") +__msg("8: safe") +__msg("processed 11 insns") +/* STACK_INVALID should prevent verifier in unpriv mode from + * considering states equivalent and force an error on second + * verification path (entry - label 1 - label 2). + */ +__failure_unpriv +__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)") +__msg_unpriv("9: (95) exit") +__msg_unpriv("8: (79) r1 = *(u64 *)(r10 -8)") +__msg_unpriv("invalid read from stack off -8+2 size 8") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_imprecise_scalar_vs_cur_stack_misc(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" + "goto 2f;" +"1:" + /* conjure STACK_{MISC,INVALID} at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u16*)(r10 - 8) = r0;" + "*(u16*)(r10 - 4) = r0;" +"2:" + /* read fp-8, should be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check that stack spill of a precise scalar in old state + * is not considered equivalent to STACK_MISC in cur state. + */ +SEC("socket") +__success __log_level(2) +/* verifier should visit 'if r1 == 0x2a ...' two times: + * - once for path entry - label 2; + * - once for path entry - label 1 - label 2. + */ +__msg("if r1 == 0x2a goto pc+0") +__msg("if r1 == 0x2a goto pc+0") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_precise_scalar_vs_cur_stack_misc(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" + "goto 2f;" +"1:" + /* conjure STACK_MISC at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u64*)(r10 - 8) = r0;" + "*(u32*)(r10 - 4) = r0;" +"2:" + /* read fp-8, should not be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + /* use r1 in precise context */ + "if r1 == 42 goto +0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check if STACK_MISC in old state is considered + * equivalent to stack spill of a scalar in cur state. + */ +SEC("socket") +__success __log_level(2) +__msg("8: (79) r0 = *(u64 *)(r10 -8)") +__msg("8: safe") +__msg("processed 11 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_stack_misc_vs_cur_scalar(void) +{ + asm volatile( + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure STACK_{MISC,INVALID} at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u16*)(r10 - 8) = r0;" + "*(u16*)(r10 - 4) = r0;" + "goto 2f;" +"1:" + /* conjure scalar at fp-8 */ + "r0 = 42;" + "*(u64*)(r10 - 8) = r0;" +"2:" + /* read fp-8, should be considered safe on second visit */ + "r0 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* stacksafe(): check that STACK_MISC in old state is not considered + * equivalent to stack spill of a non-scalar in cur state. + */ +SEC("socket") +__success __log_level(2) +/* verifier should process exit instructions twice: + * - once for path entry - label 2; + * - once for path entry - label 1 - label 2. + */ +__msg("r1 = *(u64 *)(r10 -8)") +__msg("exit") +__msg("r1 = *(u64 *)(r10 -8)") +__msg("exit") +__msg("processed 11 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void old_stack_misc_vs_cur_ctx_ptr(void) +{ + asm volatile( + /* remember context pointer in r9 */ + "r9 = r1;" + /* get a random value for branching */ + "call %[bpf_ktime_get_ns];" + "if r0 == 0 goto 1f;" + /* conjure STACK_MISC at fp-8 */ + "call %[bpf_ktime_get_ns];" + "*(u64*)(r10 - 8) = r0;" + "*(u32*)(r10 - 4) = r0;" + "goto 2f;" +"1:" + /* conjure context pointer in fp-8 */ + "*(u64*)(r10 - 8) = r9;" +"2:" + /* read fp-8, should not be considered safe on second visit */ + "r1 = *(u64*)(r10 - 8);" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c index 9c1aa69650..fb316c080c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_spin_lock.c +++ b/tools/testing/selftests/bpf/progs/verifier_spin_lock.c @@ -330,7 +330,7 @@ l1_%=: r7 = r0; \ SEC("cgroup/skb") __description("spin_lock: test10 lock in subprog without unlock") -__failure __msg("unlock is missing") +__success __failure_unpriv __msg_unpriv("") __naked void lock_in_subprog_without_unlock(void) { diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index 518329c666..7ea9785738 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -7,6 +7,8 @@ #include <bpf/bpf_endian.h> #include <asm/errno.h> +#include "bpf_compiler.h" + #define TC_ACT_OK 0 #define TC_ACT_SHOT 2 @@ -151,11 +153,11 @@ static __always_inline __u16 csum_ipv6_magic(const struct in6_addr *saddr, __u64 sum = csum; int i; -#pragma unroll + __pragma_loop_unroll for (i = 0; i < 4; i++) sum += (__u32)saddr->in6_u.u6_addr32[i]; -#pragma unroll + __pragma_loop_unroll for (i = 0; i < 4; i++) sum += (__u32)daddr->in6_u.u6_addr32[i]; diff --git a/tools/testing/selftests/bpf/progs/xdping_kern.c b/tools/testing/selftests/bpf/progs/xdping_kern.c index 54cf176511..44e2b0ef23 100644 --- a/tools/testing/selftests/bpf/progs/xdping_kern.c +++ b/tools/testing/selftests/bpf/progs/xdping_kern.c @@ -15,6 +15,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_endian.h> +#include "bpf_compiler.h" #include "xdping.h" struct { @@ -116,7 +117,7 @@ int xdping_client(struct xdp_md *ctx) return XDP_PASS; if (pinginfo->start) { -#pragma clang loop unroll(full) + __pragma_loop_unroll_full for (i = 0; i < XDPING_MAX_COUNT; i++) { if (pinginfo->times[i] == 0) break; |