diff options
Diffstat (limited to 'tools/testing/selftests/bpf/progs')
48 files changed, 2944 insertions, 86 deletions
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c index dd923dc637..dd923dc637 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_vmas.c diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c index 96131b9a1c..96131b9a1c 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_task.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c diff --git a/tools/testing/selftests/bpf/progs/connect_unix_prog.c b/tools/testing/selftests/bpf/progs/connect_unix_prog.c new file mode 100644 index 0000000000..ca8aa2f116 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/connect_unix_prog.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" + +#include <string.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" + +__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite"; + +SEC("cgroup/connect_unix") +int connect_unix_prog(struct bpf_sock_addr *ctx) +{ + struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx); + struct sockaddr_un *sa_kern_unaddr; + __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) + + sizeof(SERVUN_REWRITE_ADDRESS) - 1; + int ret; + + /* Rewrite destination. */ + ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1); + if (ret) + return 0; + + if (sa_kern->uaddrlen != unaddrlen) + return 0; + + sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, + bpf_core_type_id_kernel(struct sockaddr_un)); + if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) + return 0; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions.c b/tools/testing/selftests/bpf/progs/exceptions.c new file mode 100644 index 0000000000..2811ee842b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/exceptions.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include <bpf/bpf_endian.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +#ifndef ETH_P_IP +#define ETH_P_IP 0x0800 +#endif + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 4); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +static __noinline int static_func(u64 i) +{ + bpf_throw(32); + return i; +} + +__noinline int global2static_simple(u64 i) +{ + static_func(i + 2); + return i - 1; +} + +__noinline int global2static(u64 i) +{ + if (i == ETH_P_IP) + bpf_throw(16); + return static_func(i); +} + +static __noinline int static2global(u64 i) +{ + return global2static(i) + i; +} + +SEC("tc") +int exception_throw_always_1(struct __sk_buff *ctx) +{ + bpf_throw(64); + return 0; +} + +/* In this case, the global func will never be seen executing after call to + * static subprog, hence verifier will DCE the remaining instructions. Ensure we + * are resilient to that. + */ +SEC("tc") +int exception_throw_always_2(struct __sk_buff *ctx) +{ + return global2static_simple(ctx->protocol); +} + +SEC("tc") +int exception_throw_unwind_1(struct __sk_buff *ctx) +{ + return static2global(bpf_ntohs(ctx->protocol)); +} + +SEC("tc") +int exception_throw_unwind_2(struct __sk_buff *ctx) +{ + return static2global(bpf_ntohs(ctx->protocol) - 1); +} + +SEC("tc") +int exception_throw_default(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 1; +} + +SEC("tc") +int exception_throw_default_value(struct __sk_buff *ctx) +{ + bpf_throw(5); + return 1; +} + +SEC("tc") +int exception_tail_call_target(struct __sk_buff *ctx) +{ + bpf_throw(16); + return 0; +} + +static __noinline +int exception_tail_call_subprog(struct __sk_buff *ctx) +{ + volatile int ret = 10; + + bpf_tail_call_static(ctx, &jmp_table, 0); + return ret; +} + +SEC("tc") +int exception_tail_call(struct __sk_buff *ctx) { + volatile int ret = 0; + + ret = exception_tail_call_subprog(ctx); + return ret + 8; +} + +__noinline int exception_ext_global(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + return ret; +} + +static __noinline int exception_ext_static(struct __sk_buff *ctx) +{ + return exception_ext_global(ctx); +} + +SEC("tc") +int exception_ext(struct __sk_buff *ctx) +{ + return exception_ext_static(ctx); +} + +__noinline int exception_cb_mod_global(u64 cookie) +{ + volatile int ret = 0; + + return ret; +} + +/* Example of how the exception callback supplied during verification can still + * introduce extensions by calling to dummy global functions, and alter runtime + * behavior. + * + * Right now we don't allow freplace attachment to exception callback itself, + * but if the need arises this restriction is technically feasible to relax in + * the future. + */ +__noinline int exception_cb_mod(u64 cookie) +{ + return exception_cb_mod_global(cookie) + cookie + 10; +} + +SEC("tc") +__exception_cb(exception_cb_mod) +int exception_ext_mod_cb_runtime(struct __sk_buff *ctx) +{ + bpf_throw(25); + return 0; +} + +__noinline static int subprog(struct __sk_buff *ctx) +{ + return bpf_ktime_get_ns(); +} + +__noinline static int throwing_subprog(struct __sk_buff *ctx) +{ + if (ctx->tstamp) + bpf_throw(0); + return bpf_ktime_get_ns(); +} + +__noinline int global_subprog(struct __sk_buff *ctx) +{ + return bpf_ktime_get_ns(); +} + +__noinline int throwing_global_subprog(struct __sk_buff *ctx) +{ + if (ctx->tstamp) + bpf_throw(0); + return bpf_ktime_get_ns(); +} + +SEC("tc") +int exception_throw_subprog(struct __sk_buff *ctx) +{ + switch (ctx->protocol) { + case 1: + return subprog(ctx); + case 2: + return global_subprog(ctx); + case 3: + return throwing_subprog(ctx); + case 4: + return throwing_global_subprog(ctx); + default: + break; + } + bpf_throw(1); + return 0; +} + +__noinline int assert_nz_gfunc(u64 c) +{ + volatile u64 cookie = c; + + bpf_assert(cookie != 0); + return 0; +} + +__noinline int assert_zero_gfunc(u64 c) +{ + volatile u64 cookie = c; + + bpf_assert_eq(cookie, 0); + return 0; +} + +__noinline int assert_neg_gfunc(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_lt(cookie, 0); + return 0; +} + +__noinline int assert_pos_gfunc(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_gt(cookie, 0); + return 0; +} + +__noinline int assert_negeq_gfunc(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_le(cookie, -1); + return 0; +} + +__noinline int assert_poseq_gfunc(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_ge(cookie, 1); + return 0; +} + +__noinline int assert_nz_gfunc_with(u64 c) +{ + volatile u64 cookie = c; + + bpf_assert_with(cookie != 0, cookie + 100); + return 0; +} + +__noinline int assert_zero_gfunc_with(u64 c) +{ + volatile u64 cookie = c; + + bpf_assert_eq_with(cookie, 0, cookie + 100); + return 0; +} + +__noinline int assert_neg_gfunc_with(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_lt_with(cookie, 0, cookie + 100); + return 0; +} + +__noinline int assert_pos_gfunc_with(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_gt_with(cookie, 0, cookie + 100); + return 0; +} + +__noinline int assert_negeq_gfunc_with(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_le_with(cookie, -1, cookie + 100); + return 0; +} + +__noinline int assert_poseq_gfunc_with(s64 c) +{ + volatile s64 cookie = c; + + bpf_assert_ge_with(cookie, 1, cookie + 100); + return 0; +} + +#define check_assert(name, cookie, tag) \ +SEC("tc") \ +int exception##tag##name(struct __sk_buff *ctx) \ +{ \ + return name(cookie) + 1; \ +} + +check_assert(assert_nz_gfunc, 5, _); +check_assert(assert_zero_gfunc, 0, _); +check_assert(assert_neg_gfunc, -100, _); +check_assert(assert_pos_gfunc, 100, _); +check_assert(assert_negeq_gfunc, -1, _); +check_assert(assert_poseq_gfunc, 1, _); + +check_assert(assert_nz_gfunc_with, 5, _); +check_assert(assert_zero_gfunc_with, 0, _); +check_assert(assert_neg_gfunc_with, -100, _); +check_assert(assert_pos_gfunc_with, 100, _); +check_assert(assert_negeq_gfunc_with, -1, _); +check_assert(assert_poseq_gfunc_with, 1, _); + +check_assert(assert_nz_gfunc, 0, _bad_); +check_assert(assert_zero_gfunc, 5, _bad_); +check_assert(assert_neg_gfunc, 100, _bad_); +check_assert(assert_pos_gfunc, -100, _bad_); +check_assert(assert_negeq_gfunc, 1, _bad_); +check_assert(assert_poseq_gfunc, -1, _bad_); + +check_assert(assert_nz_gfunc_with, 0, _bad_); +check_assert(assert_zero_gfunc_with, 5, _bad_); +check_assert(assert_neg_gfunc_with, 100, _bad_); +check_assert(assert_pos_gfunc_with, -100, _bad_); +check_assert(assert_negeq_gfunc_with, 1, _bad_); +check_assert(assert_poseq_gfunc_with, -1, _bad_); + +SEC("tc") +int exception_assert_range(struct __sk_buff *ctx) +{ + u64 time = bpf_ktime_get_ns(); + + bpf_assert_range(time, 0, ~0ULL); + return 1; +} + +SEC("tc") +int exception_assert_range_with(struct __sk_buff *ctx) +{ + u64 time = bpf_ktime_get_ns(); + + bpf_assert_range_with(time, 0, ~0ULL, 10); + return 1; +} + +SEC("tc") +int exception_bad_assert_range(struct __sk_buff *ctx) +{ + u64 time = bpf_ktime_get_ns(); + + bpf_assert_range(time, -100, 100); + return 1; +} + +SEC("tc") +int exception_bad_assert_range_with(struct __sk_buff *ctx) +{ + u64 time = bpf_ktime_get_ns(); + + bpf_assert_range_with(time, -1000, 1000, 10); + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c new file mode 100644 index 0000000000..e1e5c54a6a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <limits.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include <bpf/bpf_endian.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +#define check_assert(type, op, name, value) \ + SEC("?tc") \ + __log_level(2) __failure \ + int check_assert_##op##_##name(void *ctx) \ + { \ + type num = bpf_ktime_get_ns(); \ + bpf_assert_##op(num, value); \ + return *(u64 *)num; \ + } + +__msg(": R0_w=-2147483648 R10=fp0") +check_assert(s64, eq, int_min, INT_MIN); +__msg(": R0_w=2147483647 R10=fp0") +check_assert(s64, eq, int_max, INT_MAX); +__msg(": R0_w=0 R10=fp0") +check_assert(s64, eq, zero, 0); +__msg(": R0_w=-9223372036854775808 R1_w=-9223372036854775808 R10=fp0") +check_assert(s64, eq, llong_min, LLONG_MIN); +__msg(": R0_w=9223372036854775807 R1_w=9223372036854775807 R10=fp0") +check_assert(s64, eq, llong_max, LLONG_MAX); + +__msg(": R0_w=scalar(smax=2147483646) R10=fp0") +check_assert(s64, lt, pos, INT_MAX); +__msg(": R0_w=scalar(smax=-1,umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))") +check_assert(s64, lt, zero, 0); +__msg(": R0_w=scalar(smax=-2147483649,umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))") +check_assert(s64, lt, neg, INT_MIN); + +__msg(": R0_w=scalar(smax=2147483647) R10=fp0") +check_assert(s64, le, pos, INT_MAX); +__msg(": R0_w=scalar(smax=0) R10=fp0") +check_assert(s64, le, zero, 0); +__msg(": R0_w=scalar(smax=-2147483648,umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))") +check_assert(s64, le, neg, INT_MIN); + +__msg(": R0_w=scalar(smin=umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))") +check_assert(s64, gt, pos, INT_MAX); +__msg(": R0_w=scalar(smin=umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))") +check_assert(s64, gt, zero, 0); +__msg(": R0_w=scalar(smin=-2147483647) R10=fp0") +check_assert(s64, gt, neg, INT_MIN); + +__msg(": R0_w=scalar(smin=umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))") +check_assert(s64, ge, pos, INT_MAX); +__msg(": R0_w=scalar(smin=0,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0") +check_assert(s64, ge, zero, 0); +__msg(": R0_w=scalar(smin=-2147483648) R10=fp0") +check_assert(s64, ge, neg, INT_MIN); + +SEC("?tc") +__log_level(2) __failure +__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=smin32=-2147483646,smax=smax32=2147483645) R10=fp0") +int check_assert_range_s64(struct __sk_buff *ctx) +{ + struct bpf_sock *sk = ctx->sk; + s64 num; + + _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match"); + if (!sk) + return 0; + num = sk->rx_queue_mapping; + bpf_assert_range(num, INT_MIN + 2, INT_MAX - 2); + return *((u8 *)ctx + num); +} + +SEC("?tc") +__log_level(2) __failure +__msg(": R1=ctx(off=0,imm=0) R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))") +int check_assert_range_u64(struct __sk_buff *ctx) +{ + u64 num = ctx->len; + + bpf_assert_range(num, 4096, 8192); + return *((u8 *)ctx + num); +} + +SEC("?tc") +__log_level(2) __failure +__msg(": R0=0 R1=ctx(off=0,imm=0) R2=4096 R10=fp0") +int check_assert_single_range_s64(struct __sk_buff *ctx) +{ + struct bpf_sock *sk = ctx->sk; + s64 num; + + _Static_assert(_Generic((sk->rx_queue_mapping), s32: 1, default: 0), "type match"); + if (!sk) + return 0; + num = sk->rx_queue_mapping; + + bpf_assert_range(num, 4096, 4096); + return *((u8 *)ctx + num); +} + +SEC("?tc") +__log_level(2) __failure +__msg(": R1=ctx(off=0,imm=0) R2=4096 R10=fp0") +int check_assert_single_range_u64(struct __sk_buff *ctx) +{ + u64 num = ctx->len; + + bpf_assert_range(num, 4096, 4096); + return *((u8 *)ctx + num); +} + +SEC("?tc") +__log_level(2) __failure +__msg(": R1=pkt(off=64,r=64,imm=0) R2=pkt_end(off=0,imm=0) R6=pkt(off=0,r=64,imm=0) R10=fp0") +int check_assert_generic(struct __sk_buff *ctx) +{ + u8 *data_end = (void *)(long)ctx->data_end; + u8 *data = (void *)(long)ctx->data; + + bpf_assert(data + 64 <= data_end); + return data[128]; +} + +SEC("?fentry/bpf_check") +__failure __msg("At program exit the register R0 has value (0x40; 0x0)") +int check_assert_with_return(void *ctx) +{ + bpf_assert_with(!ctx, 64); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions_ext.c b/tools/testing/selftests/bpf/progs/exceptions_ext.c new file mode 100644 index 0000000000..743c05185d --- /dev/null +++ b/tools/testing/selftests/bpf/progs/exceptions_ext.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#include "bpf_experimental.h" + +SEC("?fentry") +int pfentry(void *ctx) +{ + return 0; +} + +SEC("?fentry") +int throwing_fentry(void *ctx) +{ + bpf_throw(0); + return 0; +} + +__noinline int exception_cb(u64 cookie) +{ + return cookie + 64; +} + +SEC("?freplace") +int extension(struct __sk_buff *ctx) +{ + return 0; +} + +SEC("?freplace") +__exception_cb(exception_cb) +int throwing_exception_cb_extension(u64 cookie) +{ + bpf_throw(32); + return 0; +} + +SEC("?freplace") +__exception_cb(exception_cb) +int throwing_extension(struct __sk_buff *ctx) +{ + bpf_throw(64); + return 0; +} + +SEC("?fexit") +int pfexit(void *ctx) +{ + return 0; +} + +SEC("?fexit") +int throwing_fexit(void *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?fmod_ret") +int pfmod_ret(void *ctx) +{ + return 0; +} + +SEC("?fmod_ret") +int throwing_fmod_ret(void *ctx) +{ + bpf_throw(0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/exceptions_fail.c b/tools/testing/selftests/bpf/progs/exceptions_fail.c new file mode 100644 index 0000000000..8c0ef27422 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/exceptions_fail.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> + +#include "bpf_misc.h" +#include "bpf_experimental.h" + +extern void bpf_rcu_read_lock(void) __ksym; + +#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) + +struct foo { + struct bpf_rb_node node; +}; + +struct hmap_elem { + struct bpf_timer timer; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 64); + __type(key, int); + __type(value, struct hmap_elem); +} hmap SEC(".maps"); + +private(A) struct bpf_spin_lock lock; +private(A) struct bpf_rb_root rbtree __contains(foo, node); + +__noinline void *exception_cb_bad_ret_type(u64 cookie) +{ + return NULL; +} + +__noinline int exception_cb_bad_arg_0(void) +{ + return 0; +} + +__noinline int exception_cb_bad_arg_2(int a, int b) +{ + return 0; +} + +__noinline int exception_cb_ok_arg_small(int a) +{ + return 0; +} + +SEC("?tc") +__exception_cb(exception_cb_bad_ret_type) +__failure __msg("Global function exception_cb_bad_ret_type() doesn't return scalar.") +int reject_exception_cb_type_1(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__exception_cb(exception_cb_bad_arg_0) +__failure __msg("exception cb only supports single integer argument") +int reject_exception_cb_type_2(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__exception_cb(exception_cb_bad_arg_2) +__failure __msg("exception cb only supports single integer argument") +int reject_exception_cb_type_3(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__exception_cb(exception_cb_ok_arg_small) +__success +int reject_exception_cb_type_4(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 0; +} + +__noinline +static int timer_cb(void *map, int *key, struct bpf_timer *timer) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__failure __msg("cannot be called from callback subprog") +int reject_async_callback_throw(struct __sk_buff *ctx) +{ + struct hmap_elem *elem; + + elem = bpf_map_lookup_elem(&hmap, &(int){0}); + if (!elem) + return 0; + return bpf_timer_set_callback(&elem->timer, timer_cb); +} + +__noinline static int subprog_lock(struct __sk_buff *ctx) +{ + volatile int ret = 0; + + bpf_spin_lock(&lock); + if (ctx->len) + bpf_throw(0); + return ret; +} + +SEC("?tc") +__failure __msg("function calls are not allowed while holding a lock") +int reject_with_lock(void *ctx) +{ + bpf_spin_lock(&lock); + bpf_throw(0); + return 0; +} + +SEC("?tc") +__failure __msg("function calls are not allowed while holding a lock") +int reject_subprog_with_lock(void *ctx) +{ + return subprog_lock(ctx); +} + +SEC("?tc") +__failure __msg("bpf_rcu_read_unlock is missing") +int reject_with_rcu_read_lock(void *ctx) +{ + bpf_rcu_read_lock(); + bpf_throw(0); + return 0; +} + +__noinline static int throwing_subprog(struct __sk_buff *ctx) +{ + if (ctx->len) + bpf_throw(0); + return 0; +} + +SEC("?tc") +__failure __msg("bpf_rcu_read_unlock is missing") +int reject_subprog_with_rcu_read_lock(void *ctx) +{ + bpf_rcu_read_lock(); + return throwing_subprog(ctx); +} + +static bool rbless(struct bpf_rb_node *n1, const struct bpf_rb_node *n2) +{ + bpf_throw(0); + return true; +} + +SEC("?tc") +__failure __msg("function calls are not allowed while holding a lock") +int reject_with_rbtree_add_throw(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_spin_lock(&lock); + bpf_rbtree_add(&rbtree, &f->node, rbless); + bpf_spin_unlock(&lock); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference") +int reject_with_reference(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_throw(0); + return 0; +} + +__noinline static int subprog_ref(struct __sk_buff *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_throw(0); + return 0; +} + +__noinline static int subprog_cb_ref(u32 i, void *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference") +int reject_with_cb_reference(void *ctx) +{ + struct foo *f; + + f = bpf_obj_new(typeof(*f)); + if (!f) + return 0; + bpf_loop(5, subprog_cb_ref, NULL, 0); + bpf_obj_drop(f); + return 0; +} + +SEC("?tc") +__failure __msg("cannot be called from callback") +int reject_with_cb(void *ctx) +{ + bpf_loop(5, subprog_cb_ref, NULL, 0); + return 0; +} + +SEC("?tc") +__failure __msg("Unreleased reference") +int reject_with_subprog_reference(void *ctx) +{ + return subprog_ref(ctx) + 1; +} + +__noinline int throwing_exception_cb(u64 c) +{ + bpf_throw(0); + return c; +} + +__noinline int exception_cb1(u64 c) +{ + return c; +} + +__noinline int exception_cb2(u64 c) +{ + return c; +} + +static __noinline int static_func(struct __sk_buff *ctx) +{ + return exception_cb1(ctx->tstamp); +} + +__noinline int global_func(struct __sk_buff *ctx) +{ + return exception_cb1(ctx->tstamp); +} + +SEC("?tc") +__exception_cb(throwing_exception_cb) +__failure __msg("cannot be called from callback subprog") +int reject_throwing_exception_cb(struct __sk_buff *ctx) +{ + return 0; +} + +SEC("?tc") +__exception_cb(exception_cb1) +__failure __msg("cannot call exception cb directly") +int reject_exception_cb_call_global_func(struct __sk_buff *ctx) +{ + return global_func(ctx); +} + +SEC("?tc") +__exception_cb(exception_cb1) +__failure __msg("cannot call exception cb directly") +int reject_exception_cb_call_static_func(struct __sk_buff *ctx) +{ + return static_func(ctx); +} + +SEC("?tc") +__exception_cb(exception_cb1) +__exception_cb(exception_cb2) +__failure __msg("multiple exception callback tags for main subprog") +int reject_multiple_exception_cb(struct __sk_buff *ctx) +{ + bpf_throw(0); + return 16; +} + +__noinline int exception_cb_bad_ret(u64 c) +{ + return c; +} + +SEC("?fentry/bpf_check") +__exception_cb(exception_cb_bad_ret) +__failure __msg("At program exit the register R0 has unknown scalar value should") +int reject_set_exception_cb_bad_ret1(void *ctx) +{ + return 0; +} + +SEC("?fentry/bpf_check") +__failure __msg("At program exit the register R0 has value (0x40; 0x0) should") +int reject_set_exception_cb_bad_ret2(void *ctx) +{ + bpf_throw(64); + return 0; +} + +__noinline static int loop_cb1(u32 index, int *ctx) +{ + bpf_throw(0); + return 0; +} + +__noinline static int loop_cb2(u32 index, int *ctx) +{ + bpf_throw(0); + return 0; +} + +SEC("?tc") +__failure __msg("cannot be called from callback") +int reject_exception_throw_cb(struct __sk_buff *ctx) +{ + bpf_loop(5, loop_cb1, NULL, 0); + return 0; +} + +SEC("?tc") +__failure __msg("cannot be called from callback") +int reject_exception_throw_cb_diff(struct __sk_buff *ctx) +{ + if (ctx->protocol) + bpf_loop(5, loop_cb1, NULL, 0); + else + bpf_loop(5, loop_cb2, NULL, 0); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c new file mode 100644 index 0000000000..9c078f34bb --- /dev/null +++ b/tools/testing/selftests/bpf/progs/getpeername_unix_prog.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" + +#include <string.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" + +__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite"; + +SEC("cgroup/getpeername_unix") +int getpeername_unix_prog(struct bpf_sock_addr *ctx) +{ + struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx); + struct sockaddr_un *sa_kern_unaddr; + __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) + + sizeof(SERVUN_REWRITE_ADDRESS) - 1; + int ret; + + ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1); + if (ret) + return 1; + + if (sa_kern->uaddrlen != unaddrlen) + return 1; + + sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, + bpf_core_type_id_kernel(struct sockaddr_un)); + if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) + return 1; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c new file mode 100644 index 0000000000..ac71451114 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/getsockname_unix_prog.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" + +#include <string.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" + +__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite"; + +SEC("cgroup/getsockname_unix") +int getsockname_unix_prog(struct bpf_sock_addr *ctx) +{ + struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx); + struct sockaddr_un *sa_kern_unaddr; + __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) + + sizeof(SERVUN_REWRITE_ADDRESS) - 1; + int ret; + + ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1); + if (ret) + return 1; + + if (sa_kern->uaddrlen != unaddrlen) + return 1; + + sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, + bpf_core_type_id_kernel(struct sockaddr_un)); + if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) + return 1; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index c20c4e38b7..844d968c27 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -846,7 +846,7 @@ __naked int delayed_precision_mark(void) "call %[bpf_iter_num_next];" "if r0 == 0 goto 2f;" "if r6 != 42 goto 3f;" - "r7 = -32;" + "r7 = -33;" "call %[bpf_get_prandom_u32];" "r6 = r0;" "goto 1b;\n" diff --git a/tools/testing/selftests/bpf/progs/iters_css.c b/tools/testing/selftests/bpf/progs/iters_css.c new file mode 100644 index 0000000000..ec1f6c2f59 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_css.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid; +u64 root_cg_id, leaf_cg_id; +u64 first_cg_id, last_cg_id; + +int pre_order_cnt, post_order_cnt, tree_high; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("fentry.s/" SYS_PREFIX "sys_getpgid") +int iter_css_for_each(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct cgroup_subsys_state *root_css, *leaf_css, *pos; + struct cgroup *root_cgrp, *leaf_cgrp, *cur_cgrp; + + if (cur_task->pid != target_pid) + return 0; + + root_cgrp = bpf_cgroup_from_id(root_cg_id); + + if (!root_cgrp) + return 0; + + leaf_cgrp = bpf_cgroup_from_id(leaf_cg_id); + + if (!leaf_cgrp) { + bpf_cgroup_release(root_cgrp); + return 0; + } + root_css = &root_cgrp->self; + leaf_css = &leaf_cgrp->self; + pre_order_cnt = post_order_cnt = tree_high = 0; + first_cg_id = last_cg_id = 0; + + bpf_rcu_read_lock(); + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + cur_cgrp = pos->cgroup; + post_order_cnt++; + last_cg_id = cur_cgrp->kn->id; + } + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_PRE) { + cur_cgrp = pos->cgroup; + pre_order_cnt++; + if (!first_cg_id) + first_cg_id = cur_cgrp->kn->id; + } + + bpf_for_each(css, pos, leaf_css, BPF_CGROUP_ITER_ANCESTORS_UP) + tree_high++; + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_ANCESTORS_UP) + tree_high--; + bpf_rcu_read_unlock(); + bpf_cgroup_release(root_cgrp); + bpf_cgroup_release(leaf_cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c new file mode 100644 index 0000000000..9ac758649c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_css_task.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */ + +#include "vmlinux.h" +#include <errno.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym; +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; + +pid_t target_pid; +int css_task_cnt; +u64 cg_id; + +SEC("lsm/file_mprotect") +int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot, int ret) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct cgroup_subsys_state *css; + struct task_struct *task; + struct cgroup *cgrp; + + if (cur_task->pid != target_pid) + return ret; + + cgrp = bpf_cgroup_from_id(cg_id); + + if (!cgrp) + return -EPERM; + + css = &cgrp->self; + css_task_cnt = 0; + + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) + if (task->pid == target_pid) + css_task_cnt++; + + bpf_cgroup_release(cgrp); + + return -EPERM; +} + +static inline u64 cgroup_id(struct cgroup *cgrp) +{ + return cgrp->kn->id; +} + +SEC("?iter/cgroup") +int cgroup_id_printer(struct bpf_iter__cgroup *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct cgroup *cgrp = ctx->cgroup; + struct cgroup_subsys_state *css; + struct task_struct *task; + + /* epilogue */ + if (cgrp == NULL) { + BPF_SEQ_PRINTF(seq, "epilogue\n"); + return 0; + } + + /* prologue */ + if (ctx->meta->seq_num == 0) + BPF_SEQ_PRINTF(seq, "prologue\n"); + + BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp)); + + css = &cgrp->self; + css_task_cnt = 0; + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { + if (task->pid == target_pid) + css_task_cnt++; + } + + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +int BPF_PROG(iter_css_task_for_each_sleep) +{ + u64 cgrp_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id); + struct cgroup_subsys_state *css; + struct task_struct *task; + + if (cgrp == NULL) + return 0; + css = &cgrp->self; + + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { + + } + bpf_cgroup_release(cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c new file mode 100644 index 0000000000..c9b4055cd4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid; +int procs_cnt, threads_cnt, proc_threads_cnt; + +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("fentry.s/" SYS_PREFIX "sys_getpgid") +int iter_task_for_each_sleep(void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct task_struct *pos; + + if (cur_task->pid != target_pid) + return 0; + procs_cnt = threads_cnt = proc_threads_cnt = 0; + + bpf_rcu_read_lock(); + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) + if (pos->pid == target_pid) + procs_cnt++; + + bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS) + proc_threads_cnt++; + + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS) + if (pos->tgid == target_pid) + threads_cnt++; + bpf_rcu_read_unlock(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c new file mode 100644 index 0000000000..6b1588d706 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou <zhouchuyi@bytedance.com> */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_task_next") +int BPF_PROG(iter_tasks_without_lock) +{ + struct task_struct *pos; + + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) { + + } + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_css_next") +int BPF_PROG(iter_css_without_lock) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *root_css, *pos; + + if (!cgrp) + return 0; + root_css = &cgrp->self; + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + + } + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_task_next") +int BPF_PROG(iter_tasks_lock_and_unlock) +{ + struct task_struct *pos; + + bpf_rcu_read_lock(); + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) { + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + } + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_css_next") +int BPF_PROG(iter_css_lock_and_unlock) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *root_css, *pos; + + if (!cgrp) + return 0; + root_css = &cgrp->self; + + bpf_rcu_read_lock(); + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + } + bpf_rcu_read_unlock(); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_getpgid") +__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs") +int BPF_PROG(iter_css_task_for_each) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *css; + struct task_struct *task; + + if (cgrp == NULL) + return 0; + css = &cgrp->self; + + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { + + } + bpf_cgroup_release(cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_task_vma.c b/tools/testing/selftests/bpf/progs/iters_task_vma.c new file mode 100644 index 0000000000..e085a51d15 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task_vma.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include "bpf_experimental.h" +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +pid_t target_pid = 0; +unsigned int vmas_seen = 0; + +struct { + __u64 vm_start; + __u64 vm_end; +} vm_ranges[1000]; + +SEC("raw_tp/sys_enter") +int iter_task_vma_for_each(const void *ctx) +{ + struct task_struct *task = bpf_get_current_task_btf(); + struct vm_area_struct *vma; + unsigned int seen = 0; + + if (task->pid != target_pid) + return 0; + + if (vmas_seen) + return 0; + + bpf_for_each(task_vma, vma, task, 0) { + if (seen >= 1000) + break; + barrier_var(seen); + + vm_ranges[seen].vm_start = vma->vm_start; + vm_ranges[seen].vm_end = vma->vm_end; + seen++; + } + + vmas_seen = seen; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe.c b/tools/testing/selftests/bpf/progs/missed_kprobe.c new file mode 100644 index 0000000000..7f9ef701f5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/missed_kprobe.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +/* + * No tests in here, just to trigger 'bpf_fentry_test*' + * through tracing test_run + */ +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(trigger) +{ + return 0; +} + +SEC("kprobe/bpf_fentry_test1") +int test1(struct pt_regs *ctx) +{ + bpf_kfunc_common_test(); + return 0; +} + +SEC("kprobe/bpf_kfunc_common_test") +int test2(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c new file mode 100644 index 0000000000..8ea71cbd6c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/missed_kprobe_recursion.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +char _license[] SEC("license") = "GPL"; + +/* + * No tests in here, just to trigger 'bpf_fentry_test*' + * through tracing test_run + */ +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(trigger) +{ + return 0; +} + +SEC("kprobe.multi/bpf_fentry_test1") +int test1(struct pt_regs *ctx) +{ + bpf_kfunc_common_test(); + return 0; +} + +SEC("kprobe/bpf_kfunc_common_test") +int test2(struct pt_regs *ctx) +{ + return 0; +} + +SEC("kprobe/bpf_kfunc_common_test") +int test3(struct pt_regs *ctx) +{ + return 0; +} + +SEC("kprobe/bpf_kfunc_common_test") +int test4(struct pt_regs *ctx) +{ + return 0; +} + +SEC("kprobe.multi/bpf_kfunc_common_test") +int test5(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/missed_tp_recursion.c b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c new file mode 100644 index 0000000000..762385f827 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/missed_tp_recursion.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +/* + * No tests in here, just to trigger 'bpf_fentry_test*' + * through tracing test_run + */ +SEC("fentry/bpf_modify_return_test") +int BPF_PROG(trigger) +{ + return 0; +} + +SEC("kprobe/bpf_fentry_test1") +int test1(struct pt_regs *ctx) +{ + bpf_printk("test"); + return 0; +} + +SEC("tp/bpf_trace/bpf_trace_printk") +int test2(struct pt_regs *ctx) +{ + return 0; +} + +SEC("tp/bpf_trace/bpf_trace_printk") +int test3(struct pt_regs *ctx) +{ + return 0; +} + +SEC("tp/bpf_trace/bpf_trace_printk") +int test4(struct pt_regs *ctx) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_array.c b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c new file mode 100644 index 0000000000..37c2d2608e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_array.c @@ -0,0 +1,190 @@ +#include "bpf_experimental.h" + +struct val_t { + long b, c, d; +}; + +struct elem { + long sum; + struct val_t __percpu_kptr *pc; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} array SEC(".maps"); + +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +const volatile int nr_cpus; + +/* Initialize the percpu object */ +SEC("?fentry/bpf_fentry_test1") +int BPF_PROG(test_array_map_1) +{ + struct val_t __percpu_kptr *p; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p = bpf_percpu_obj_new(struct val_t); + if (!p) + return 0; + + p = bpf_kptr_xchg(&e->pc, p); + if (p) + bpf_percpu_obj_drop(p); + + return 0; +} + +/* Update percpu data */ +SEC("?fentry/bpf_fentry_test2") +int BPF_PROG(test_array_map_2) +{ + struct val_t __percpu_kptr *p; + struct val_t *v; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p = e->pc; + if (!p) + return 0; + + v = bpf_per_cpu_ptr(p, 0); + if (!v) + return 0; + v->c = 1; + v->d = 2; + + return 0; +} + +int cpu0_field_d, sum_field_c; +int my_pid; + +/* Summarize percpu data */ +SEC("?fentry/bpf_fentry_test3") +int BPF_PROG(test_array_map_3) +{ + struct val_t __percpu_kptr *p; + int i, index = 0; + struct val_t *v; + struct elem *e; + + if ((bpf_get_current_pid_tgid() >> 32) != my_pid) + return 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p = e->pc; + if (!p) + return 0; + + bpf_for(i, 0, nr_cpus) { + v = bpf_per_cpu_ptr(p, i); + if (v) { + if (i == 0) + cpu0_field_d = v->d; + sum_field_c += v->c; + } + } + + return 0; +} + +/* Explicitly free allocated percpu data */ +SEC("?fentry/bpf_fentry_test4") +int BPF_PROG(test_array_map_4) +{ + struct val_t __percpu_kptr *p; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + /* delete */ + p = bpf_kptr_xchg(&e->pc, NULL); + if (p) { + bpf_percpu_obj_drop(p); + } + + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +int BPF_PROG(test_array_map_10) +{ + struct val_t __percpu_kptr *p, *p1; + int i, index = 0; + struct val_t *v; + struct elem *e; + + if ((bpf_get_current_pid_tgid() >> 32) != my_pid) + return 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + bpf_rcu_read_lock(); + p = e->pc; + if (!p) { + p = bpf_percpu_obj_new(struct val_t); + if (!p) + goto out; + + p1 = bpf_kptr_xchg(&e->pc, p); + if (p1) { + /* race condition */ + bpf_percpu_obj_drop(p1); + } + } + + v = bpf_this_cpu_ptr(p); + v->c = 3; + v = bpf_this_cpu_ptr(p); + v->c = 0; + + v = bpf_per_cpu_ptr(p, 0); + if (!v) + goto out; + v->c = 1; + v->d = 2; + + /* delete */ + p1 = bpf_kptr_xchg(&e->pc, NULL); + if (!p1) + goto out; + + bpf_for(i, 0, nr_cpus) { + v = bpf_per_cpu_ptr(p, i); + if (v) { + if (i == 0) + cpu0_field_d = v->d; + sum_field_c += v->c; + } + } + + /* finally release p */ + bpf_percpu_obj_drop(p1); +out: + bpf_rcu_read_unlock(); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c new file mode 100644 index 0000000000..a2acf9aa6c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_cgrp_local_storage.c @@ -0,0 +1,109 @@ +#include "bpf_experimental.h" + +struct val_t { + long b, c, d; +}; + +struct elem { + long sum; + struct val_t __percpu_kptr *pc; +}; + +struct { + __uint(type, BPF_MAP_TYPE_CGRP_STORAGE); + __uint(map_flags, BPF_F_NO_PREALLOC); + __type(key, int); + __type(value, struct elem); +} cgrp SEC(".maps"); + +const volatile int nr_cpus; + +/* Initialize the percpu object */ +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(test_cgrp_local_storage_1) +{ + struct task_struct *task; + struct val_t __percpu_kptr *p; + struct elem *e; + + task = bpf_get_current_task_btf(); + e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, + BPF_LOCAL_STORAGE_GET_F_CREATE); + if (!e) + return 0; + + p = bpf_percpu_obj_new(struct val_t); + if (!p) + return 0; + + p = bpf_kptr_xchg(&e->pc, p); + if (p) + bpf_percpu_obj_drop(p); + + return 0; +} + +/* Percpu data collection */ +SEC("fentry/bpf_fentry_test2") +int BPF_PROG(test_cgrp_local_storage_2) +{ + struct task_struct *task; + struct val_t __percpu_kptr *p; + struct val_t *v; + struct elem *e; + + task = bpf_get_current_task_btf(); + e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); + if (!e) + return 0; + + p = e->pc; + if (!p) + return 0; + + v = bpf_per_cpu_ptr(p, 0); + if (!v) + return 0; + v->c = 1; + v->d = 2; + return 0; +} + +int cpu0_field_d, sum_field_c; +int my_pid; + +/* Summarize percpu data collection */ +SEC("fentry/bpf_fentry_test3") +int BPF_PROG(test_cgrp_local_storage_3) +{ + struct task_struct *task; + struct val_t __percpu_kptr *p; + struct val_t *v; + struct elem *e; + int i; + + if ((bpf_get_current_pid_tgid() >> 32) != my_pid) + return 0; + + task = bpf_get_current_task_btf(); + e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0); + if (!e) + return 0; + + p = e->pc; + if (!p) + return 0; + + bpf_for(i, 0, nr_cpus) { + v = bpf_per_cpu_ptr(p, i); + if (v) { + if (i == 0) + cpu0_field_d = v->d; + sum_field_c += v->c; + } + } + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c new file mode 100644 index 0000000000..1a891d30f1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/percpu_alloc_fail.c @@ -0,0 +1,164 @@ +#include "bpf_experimental.h" +#include "bpf_misc.h" + +struct val_t { + long b, c, d; +}; + +struct val2_t { + long b; +}; + +struct val_with_ptr_t { + char *p; +}; + +struct val_with_rb_root_t { + struct bpf_spin_lock lock; +}; + +struct elem { + long sum; + struct val_t __percpu_kptr *pc; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct elem); +} array SEC(".maps"); + +long ret; + +SEC("?fentry/bpf_fentry_test1") +__failure __msg("store to referenced kptr disallowed") +int BPF_PROG(test_array_map_1) +{ + struct val_t __percpu_kptr *p; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p = bpf_percpu_obj_new(struct val_t); + if (!p) + return 0; + + p = bpf_kptr_xchg(&e->pc, p); + if (p) + bpf_percpu_obj_drop(p); + + e->pc = (struct val_t __percpu_kptr *)ret; + return 0; +} + +SEC("?fentry/bpf_fentry_test1") +__failure __msg("invalid kptr access, R2 type=percpu_ptr_val2_t expected=ptr_val_t") +int BPF_PROG(test_array_map_2) +{ + struct val2_t __percpu_kptr *p2; + struct val_t __percpu_kptr *p; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p2 = bpf_percpu_obj_new(struct val2_t); + if (!p2) + return 0; + + p = bpf_kptr_xchg(&e->pc, p2); + if (p) + bpf_percpu_obj_drop(p); + + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +__failure __msg("R1 type=scalar expected=percpu_ptr_, percpu_rcu_ptr_, percpu_trusted_ptr_") +int BPF_PROG(test_array_map_3) +{ + struct val_t __percpu_kptr *p, *p1; + struct val_t *v; + struct elem *e; + int index = 0; + + e = bpf_map_lookup_elem(&array, &index); + if (!e) + return 0; + + p = bpf_percpu_obj_new(struct val_t); + if (!p) + return 0; + + p1 = bpf_kptr_xchg(&e->pc, p); + if (p1) + bpf_percpu_obj_drop(p1); + + v = bpf_this_cpu_ptr(p); + ret = v->b; + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +__failure __msg("arg#0 expected for bpf_percpu_obj_drop_impl()") +int BPF_PROG(test_array_map_4) +{ + struct val_t __percpu_kptr *p; + + p = bpf_percpu_obj_new(struct val_t); + if (!p) + return 0; + + bpf_obj_drop(p); + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +__failure __msg("arg#0 expected for bpf_obj_drop_impl()") +int BPF_PROG(test_array_map_5) +{ + struct val_t *p; + + p = bpf_obj_new(struct val_t); + if (!p) + return 0; + + bpf_percpu_obj_drop(p); + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +__failure __msg("bpf_percpu_obj_new type ID argument must be of a struct of scalars") +int BPF_PROG(test_array_map_6) +{ + struct val_with_ptr_t __percpu_kptr *p; + + p = bpf_percpu_obj_new(struct val_with_ptr_t); + if (!p) + return 0; + + bpf_percpu_obj_drop(p); + return 0; +} + +SEC("?fentry.s/bpf_fentry_test1") +__failure __msg("bpf_percpu_obj_new type ID argument must not contain special fields") +int BPF_PROG(test_array_map_7) +{ + struct val_with_rb_root_t __percpu_kptr *p; + + p = bpf_percpu_obj_new(struct val_with_rb_root_t); + if (!p) + return 0; + + bpf_percpu_obj_drop(p); + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c new file mode 100644 index 0000000000..55907ef961 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/preempted_bpf_ma_op.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023. Huawei Technologies Co., Ltd */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_experimental.h" + +struct bin_data { + char data[256]; + struct bpf_spin_lock lock; +}; + +struct map_value { + struct bin_data __kptr * data; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 2048); +} array SEC(".maps"); + +char _license[] SEC("license") = "GPL"; + +bool nomem_err = false; + +static int del_array(unsigned int i, int *from) +{ + struct map_value *value; + struct bin_data *old; + + value = bpf_map_lookup_elem(&array, from); + if (!value) + return 1; + + old = bpf_kptr_xchg(&value->data, NULL); + if (old) + bpf_obj_drop(old); + + (*from)++; + return 0; +} + +static int add_array(unsigned int i, int *from) +{ + struct bin_data *old, *new; + struct map_value *value; + + value = bpf_map_lookup_elem(&array, from); + if (!value) + return 1; + + new = bpf_obj_new(typeof(*new)); + if (!new) { + nomem_err = true; + return 1; + } + + old = bpf_kptr_xchg(&value->data, new); + if (old) + bpf_obj_drop(old); + + (*from)++; + return 0; +} + +static void del_then_add_array(int from) +{ + int i; + + i = from; + bpf_loop(512, del_array, &i, 0); + + i = from; + bpf_loop(512, add_array, &i, 0); +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG2(test0, int, a) +{ + del_then_add_array(0); + return 0; +} + +SEC("fentry/bpf_fentry_test2") +int BPF_PROG2(test1, int, a, u64, b) +{ + del_then_add_array(512); + return 0; +} + +SEC("fentry/bpf_fentry_test3") +int BPF_PROG2(test2, char, a, int, b, u64, c) +{ + del_then_add_array(1024); + return 0; +} + +SEC("fentry/bpf_fentry_test4") +int BPF_PROG2(test3, void *, a, char, b, int, c, u64, d) +{ + del_then_add_array(1536); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index f799d87e87..897061930c 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -609,7 +609,7 @@ out: } SEC("tracepoint/syscalls/sys_enter_kill") -int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx) +int tracepoint__syscalls__sys_enter_kill(struct syscall_trace_enter* ctx) { struct bpf_func_stats_ctx stats_ctx; diff --git a/tools/testing/selftests/bpf/progs/pyperf180.c b/tools/testing/selftests/bpf/progs/pyperf180.c index c39f559d31..42c4a8b62e 100644 --- a/tools/testing/selftests/bpf/progs/pyperf180.c +++ b/tools/testing/selftests/bpf/progs/pyperf180.c @@ -1,4 +1,26 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #define STACK_MAX_LEN 180 + +/* llvm upstream commit at clang18 + * https://github.com/llvm/llvm-project/commit/1a2e77cf9e11dbf56b5720c607313a566eebb16e + * changed inlining behavior and caused compilation failure as some branch + * target distance exceeded 16bit representation which is the maximum for + * cpu v1/v2/v3. Macro __BPF_CPU_VERSION__ is later implemented in clang18 + * to specify which cpu version is used for compilation. So a smaller + * unroll_count can be set if __BPF_CPU_VERSION__ is less than 4, which + * reduced some branch target distances and resolved the compilation failure. + * + * To capture the case where a developer/ci uses clang18 but the corresponding + * repo checkpoint does not have __BPF_CPU_VERSION__, a smaller unroll_count + * will be set as well to prevent potential compilation failures. + */ +#ifdef __BPF_CPU_VERSION__ +#if __BPF_CPU_VERSION__ < 4 +#define UNROLL_COUNT 90 +#endif +#elif __clang_major__ == 18 +#define UNROLL_COUNT 90 +#endif + #include "pyperf.h" diff --git a/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c new file mode 100644 index 0000000000..4dfbc85525 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/recvmsg_unix_prog.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" + +#include <string.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" + +__u8 SERVUN_ADDRESS[] = "\0bpf_cgroup_unix_test"; + +SEC("cgroup/recvmsg_unix") +int recvmsg_unix_prog(struct bpf_sock_addr *ctx) +{ + struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx); + struct sockaddr_un *sa_kern_unaddr; + __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) + + sizeof(SERVUN_ADDRESS) - 1; + int ret; + + ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_ADDRESS, + sizeof(SERVUN_ADDRESS) - 1); + if (ret) + return 1; + + if (sa_kern->uaddrlen != unaddrlen) + return 1; + + sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, + bpf_core_type_id_kernel(struct sockaddr_un)); + if (memcmp(sa_kern_unaddr->sun_path, SERVUN_ADDRESS, + sizeof(SERVUN_ADDRESS) - 1) != 0) + return 1; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c new file mode 100644 index 0000000000..1f67e83266 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sendmsg_unix_prog.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" + +#include <string.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_core_read.h> +#include "bpf_kfuncs.h" + +__u8 SERVUN_REWRITE_ADDRESS[] = "\0bpf_cgroup_unix_test_rewrite"; + +SEC("cgroup/sendmsg_unix") +int sendmsg_unix_prog(struct bpf_sock_addr *ctx) +{ + struct bpf_sock_addr_kern *sa_kern = bpf_cast_to_kern_ctx(ctx); + struct sockaddr_un *sa_kern_unaddr; + __u32 unaddrlen = offsetof(struct sockaddr_un, sun_path) + + sizeof(SERVUN_REWRITE_ADDRESS) - 1; + int ret; + + /* Rewrite destination. */ + ret = bpf_sock_addr_set_sun_path(sa_kern, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1); + if (ret) + return 0; + + if (sa_kern->uaddrlen != unaddrlen) + return 0; + + sa_kern_unaddr = bpf_rdonly_cast(sa_kern->uaddr, + bpf_core_type_id_kernel(struct sockaddr_un)); + if (memcmp(sa_kern_unaddr->sun_path, SERVUN_REWRITE_ADDRESS, + sizeof(SERVUN_REWRITE_ADDRESS) - 1) != 0) + return 0; + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c new file mode 100644 index 0000000000..8436c67291 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fentry.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Leon Hwang */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +int count = 0; + +SEC("fentry/subprog_tail") +int BPF_PROG(fentry, struct sk_buff *skb) +{ + count++; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c new file mode 100644 index 0000000000..fe16412c6e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_fexit.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Leon Hwang */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +int count = 0; + +SEC("fexit/subprog_tail") +int BPF_PROG(fexit, struct sk_buff *skb) +{ + count++; + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tailcall_poke.c b/tools/testing/selftests/bpf/progs/tailcall_poke.c new file mode 100644 index 0000000000..c78b94b75e --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tailcall_poke.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} jmp_table SEC(".maps"); + +SEC("?fentry/bpf_fentry_test1") +int BPF_PROG(test, int a) +{ + bpf_tail_call_static(ctx, &jmp_table, 0); + return 0; +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(call1, int a) +{ + return 0; +} + +SEC("fentry/bpf_fentry_test1") +int BPF_PROG(call2, int a) +{ + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index ecde41ae0f..b685a4aba6 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -37,10 +37,20 @@ int pid = 0; __type(key, int); \ __type(value, struct map_value_##_size); \ __uint(max_entries, 128); \ - } array_##_size SEC(".maps"); + } array_##_size SEC(".maps") -static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch, - unsigned int idx) +#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \ + struct map_value_percpu_##_size { \ + struct bin_data_##_size __percpu_kptr * data; \ + }; \ + struct { \ + __uint(type, BPF_MAP_TYPE_ARRAY); \ + __type(key, int); \ + __type(value, struct map_value_percpu_##_size); \ + __uint(max_entries, 128); \ + } array_percpu_##_size SEC(".maps") + +static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx) { struct generic_map_value *value; unsigned int i, key; @@ -65,6 +75,14 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b return; } } +} + +static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old; + for (i = 0; i < batch; i++) { key = i; value = bpf_map_lookup_elem(map, &key); @@ -81,8 +99,72 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b } } +static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch, + unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old, *new; + + for (i = 0; i < batch; i++) { + key = i; + value = bpf_map_lookup_elem(map, &key); + if (!value) { + err = 1; + return; + } + /* per-cpu allocator may not be able to refill in time */ + new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL); + if (!new) + continue; + + old = bpf_kptr_xchg(&value->data, new); + if (old) { + bpf_percpu_obj_drop(old); + err = 2; + return; + } + } +} + +static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch, + unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old; + + for (i = 0; i < batch; i++) { + key = i; + value = bpf_map_lookup_elem(map, &key); + if (!value) { + err = 3; + return; + } + old = bpf_kptr_xchg(&value->data, NULL); + if (!old) + continue; + bpf_percpu_obj_drop(old); + } +} + +#define CALL_BATCH_ALLOC(size, batch, idx) \ + batch_alloc((struct bpf_map *)(&array_##size), batch, idx) + #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \ - batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx) + do { \ + batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \ + batch_free((struct bpf_map *)(&array_##size), batch, idx); \ + } while (0) + +#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \ + batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx) + +#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \ + do { \ + batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \ + batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \ + } while (0) DEFINE_ARRAY_WITH_KPTR(8); DEFINE_ARRAY_WITH_KPTR(16); @@ -97,8 +179,21 @@ DEFINE_ARRAY_WITH_KPTR(1024); DEFINE_ARRAY_WITH_KPTR(2048); DEFINE_ARRAY_WITH_KPTR(4096); -SEC("fentry/" SYS_PREFIX "sys_nanosleep") -int test_bpf_mem_alloc_free(void *ctx) +/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */ +DEFINE_ARRAY_WITH_PERCPU_KPTR(16); +DEFINE_ARRAY_WITH_PERCPU_KPTR(32); +DEFINE_ARRAY_WITH_PERCPU_KPTR(64); +DEFINE_ARRAY_WITH_PERCPU_KPTR(96); +DEFINE_ARRAY_WITH_PERCPU_KPTR(128); +DEFINE_ARRAY_WITH_PERCPU_KPTR(192); +DEFINE_ARRAY_WITH_PERCPU_KPTR(256); +DEFINE_ARRAY_WITH_PERCPU_KPTR(512); +DEFINE_ARRAY_WITH_PERCPU_KPTR(1024); +DEFINE_ARRAY_WITH_PERCPU_KPTR(2048); +DEFINE_ARRAY_WITH_PERCPU_KPTR(4096); + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_batch_alloc_free(void *ctx) { if ((u32)bpf_get_current_pid_tgid() != pid) return 0; @@ -121,3 +216,76 @@ int test_bpf_mem_alloc_free(void *ctx) return 0; } + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_free_through_map_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 8-bytes objects in batch to trigger refilling, + * then free these objects through map free. + */ + CALL_BATCH_ALLOC(8, 128, 0); + CALL_BATCH_ALLOC(16, 128, 1); + CALL_BATCH_ALLOC(32, 128, 2); + CALL_BATCH_ALLOC(64, 128, 3); + CALL_BATCH_ALLOC(96, 128, 4); + CALL_BATCH_ALLOC(128, 128, 5); + CALL_BATCH_ALLOC(192, 128, 6); + CALL_BATCH_ALLOC(256, 128, 7); + CALL_BATCH_ALLOC(512, 64, 8); + CALL_BATCH_ALLOC(1024, 32, 9); + CALL_BATCH_ALLOC(2048, 16, 10); + CALL_BATCH_ALLOC(4096, 8, 11); + + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_batch_percpu_alloc_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, + * then free 128 16-bytes per-cpu objects in batch to trigger freeing. + */ + CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8); + CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9); + CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10); + CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11); + + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_percpu_free_through_map_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, + * then free these object through map free. + */ + CALL_BATCH_PERCPU_ALLOC(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC(512, 64, 8); + CALL_BATCH_PERCPU_ALLOC(1024, 32, 9); + CALL_BATCH_PERCPU_ALLOC(2048, 16, 10); + CALL_BATCH_PERCPU_ALLOC(4096, 8, 11); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_global_func17.c b/tools/testing/selftests/bpf/progs/test_global_func17.c index a32e11c7d9..5de44b09e8 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func17.c +++ b/tools/testing/selftests/bpf/progs/test_global_func17.c @@ -5,6 +5,7 @@ __noinline int foo(int *p) { + barrier_var(p); return p ? (*p = 42) : 0; } diff --git a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c index 67c14ba1e8..2a2a942737 100644 --- a/tools/testing/selftests/bpf/progs/test_ldsx_insn.c +++ b/tools/testing/selftests/bpf/progs/test_ldsx_insn.c @@ -6,7 +6,9 @@ #include <bpf/bpf_tracing.h> #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 const volatile int skip = 0; #else const volatile int skip = 1; @@ -104,7 +106,11 @@ int _tc(volatile struct __sk_buff *skb) "%[tmp_mark] = r1" : [tmp_mark]"=r"(tmp_mark) : [ctx]"r"(skb), - [off_mark]"i"(offsetof(struct __sk_buff, mark)) + [off_mark]"i"(offsetof(struct __sk_buff, mark) +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + + sizeof(skb->mark) - 1 +#endif + ) : "r1"); #else tmp_mark = (char)skb->mark; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c index 464d35bd57..b7250eb9c3 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_listen.c +++ b/tools/testing/selftests/bpf/progs/test_sockmap_listen.c @@ -15,6 +15,13 @@ struct { } sock_map SEC(".maps"); struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __uint(max_entries, 2); + __type(key, __u32); + __type(value, __u64); +} nop_map SEC(".maps"); + +struct { __uint(type, BPF_MAP_TYPE_SOCKHASH); __uint(max_entries, 2); __type(key, __u32); diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c index 56cdc0a553..7e750309ce 100644 --- a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -18,7 +18,7 @@ const volatile __u64 cgid; int remote_pid; SEC("tp_btf/task_newtask") -int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags) +int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags) { struct cgroup *cgrp = NULL; struct task_struct *acquired; @@ -48,4 +48,30 @@ out: return 0; } +SEC("lsm.s/bpf") +int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct cgroup *cgrp = NULL; + struct task_struct *task; + int ret = 0; + + task = bpf_get_current_task_btf(); + if (local_pid != task->pid) + return 0; + + if (cmd != BPF_LINK_CREATE) + return 0; + + /* 1 is the root cgroup */ + cgrp = bpf_cgroup_from_id(1); + if (!cgrp) + goto out; + if (!bpf_task_under_cgroup(task, cgrp)) + ret = -1; + bpf_cgroup_release(cgrp); + +out: + return ret; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c index 30e7124c49..992400acb9 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_link.c +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -1,7 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Isovalent */ #include <stdbool.h> + #include <linux/bpf.h> +#include <linux/if_ether.h> + +#include <bpf/bpf_endian.h> #include <bpf/bpf_helpers.h> char LICENSE[] SEC("license") = "GPL"; @@ -12,10 +16,19 @@ bool seen_tc3; bool seen_tc4; bool seen_tc5; bool seen_tc6; +bool seen_eth; SEC("tc/ingress") int tc1(struct __sk_buff *skb) { + struct ethhdr eth = {}; + + if (skb->protocol != __bpf_constant_htons(ETH_P_IP)) + goto out; + if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth))) + goto out; + seen_eth = eth.h_proto == bpf_htons(ETH_P_IP); +out: seen_tc1 = true; return TCX_NEXT; } diff --git a/tools/testing/selftests/bpf/progs/test_uprobe.c b/tools/testing/selftests/bpf/progs/test_uprobe.c new file mode 100644 index 0000000000..896c88a496 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_uprobe.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Hengqi Chen */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +pid_t my_pid = 0; + +int test1_result = 0; +int test2_result = 0; +int test3_result = 0; +int test4_result = 0; + +SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset") +int BPF_UPROBE(test1) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + test1_result = 1; + return 0; +} + +SEC("uprobe/./liburandom_read.so:urandlib_api_sameoffset@LIBURANDOM_READ_1.0.0") +int BPF_UPROBE(test2) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + test2_result = 1; + return 0; +} + +SEC("uretprobe/./liburandom_read.so:urandlib_api_sameoffset@@LIBURANDOM_READ_2.0.0") +int BPF_URETPROBE(test3, int ret) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + test3_result = ret; + return 0; +} + +SEC("uprobe") +int BPF_UPROBE(test4) +{ + pid_t pid = bpf_get_current_pid_tgid() >> 32; + + if (pid != my_pid) + return 0; + + test4_result = 1; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c index 4b8e37f7fd..78b23934d9 100644 --- a/tools/testing/selftests/bpf/progs/test_vmlinux.c +++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c @@ -16,12 +16,12 @@ bool kprobe_called = false; bool fentry_called = false; SEC("tp/syscalls/sys_enter_nanosleep") -int handle__tp(struct trace_event_raw_sys_enter *args) +int handle__tp(struct syscall_trace_enter *args) { struct __kernel_timespec *ts; long tv_nsec; - if (args->id != __NR_nanosleep) + if (args->nr != __NR_nanosleep) return 0; ts = (void *)args->args[0]; diff --git a/tools/testing/selftests/bpf/progs/timer.c b/tools/testing/selftests/bpf/progs/timer.c index 9a16d95213..8b946c8188 100644 --- a/tools/testing/selftests/bpf/progs/timer.c +++ b/tools/testing/selftests/bpf/progs/timer.c @@ -51,7 +51,7 @@ struct { __uint(max_entries, 1); __type(key, int); __type(value, struct elem); -} abs_timer SEC(".maps"); +} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"); __u64 bss_data; __u64 abs_data; @@ -59,6 +59,8 @@ __u64 err; __u64 ok; __u64 callback_check = 52; __u64 callback2_check = 52; +__u64 pinned_callback_check; +__s32 pinned_cpu; #define ARRAY 1 #define HTAB 2 @@ -329,3 +331,62 @@ int BPF_PROG2(test3, int, a) return 0; } + +/* callback for pinned timer */ +static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer) +{ + __s32 cpu = bpf_get_smp_processor_id(); + + if (cpu != pinned_cpu) + err |= 16384; + + pinned_callback_check++; + return 0; +} + +static void test_pinned_timer(bool soft) +{ + int key = 0; + void *map; + struct bpf_timer *timer; + __u64 flags = BPF_F_TIMER_CPU_PIN; + __u64 start_time; + + if (soft) { + map = &soft_timer_pinned; + start_time = 0; + } else { + map = &abs_timer_pinned; + start_time = bpf_ktime_get_boot_ns(); + flags |= BPF_F_TIMER_ABS; + } + + timer = bpf_map_lookup_elem(map, &key); + if (timer) { + if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0) + err |= 4096; + bpf_timer_set_callback(timer, timer_cb_pinned); + pinned_cpu = bpf_get_smp_processor_id(); + bpf_timer_start(timer, start_time + 1000, flags); + } else { + err |= 8192; + } +} + +SEC("fentry/bpf_fentry_test4") +int BPF_PROG2(test4, int, a) +{ + bpf_printk("test4"); + test_pinned_timer(true); + + return 0; +} + +SEC("fentry/bpf_fentry_test5") +int BPF_PROG2(test5, int, a) +{ + bpf_printk("test5"); + test_pinned_timer(false); + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_bswap.c b/tools/testing/selftests/bpf/progs/verifier_bswap.c index 8893094725..e61755656e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bswap.c +++ b/tools/testing/selftests/bpf/progs/verifier_bswap.c @@ -5,7 +5,10 @@ #include "bpf_misc.h" #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 SEC("socket") __description("BSWAP, 16") diff --git a/tools/testing/selftests/bpf/progs/verifier_cfg.c b/tools/testing/selftests/bpf/progs/verifier_cfg.c index df7697b940..c1f55e1d80 100644 --- a/tools/testing/selftests/bpf/progs/verifier_cfg.c +++ b/tools/testing/selftests/bpf/progs/verifier_cfg.c @@ -97,4 +97,66 @@ l0_%=: r2 = r0; \ " ::: __clobber_all); } +SEC("socket") +__description("conditional loop (2)") +__success +__failure_unpriv __msg_unpriv("back-edge from insn 10 to 11") +__naked void conditional_loop2(void) +{ + asm volatile (" \ + r9 = 2 ll; \ + r3 = 0x20 ll; \ + r4 = 0x35 ll; \ + r8 = r4; \ + goto l1_%=; \ +l0_%=: r9 -= r3; \ + r9 -= r4; \ + r9 -= r8; \ +l1_%=: r8 += r4; \ + if r8 < 0x64 goto l0_%=; \ + r0 = r9; \ + exit; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unconditional loop after conditional jump") +__failure __msg("infinite loop detected") +__failure_unpriv __msg_unpriv("back-edge from insn 3 to 2") +__naked void uncond_loop_after_cond_jmp(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 > 0 goto l1_%=; \ +l0_%=: r0 = 1; \ + goto l0_%=; \ +l1_%=: exit; \ +" ::: __clobber_all); +} + + +__naked __noinline __used +static unsigned long never_ending_subprog() +{ + asm volatile (" \ + r0 = r1; \ + goto -1; \ +" ::: __clobber_all); +} + +SEC("socket") +__description("unconditional loop after conditional jump") +/* infinite loop is detected *after* check_cfg() */ +__failure __msg("infinite loop detected") +__naked void uncond_loop_in_subprog_after_cond_jmp(void) +{ + asm volatile (" \ + r0 = 0; \ + if r0 > 0 goto l1_%=; \ +l0_%=: r0 += 1; \ + call never_ending_subprog; \ +l1_%=: exit; \ +" ::: __clobber_all); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_gotol.c b/tools/testing/selftests/bpf/progs/verifier_gotol.c index 2dae5322a1..d1edbcff9a 100644 --- a/tools/testing/selftests/bpf/progs/verifier_gotol.c +++ b/tools/testing/selftests/bpf/progs/verifier_gotol.c @@ -5,7 +5,10 @@ #include "bpf_misc.h" #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 SEC("socket") __description("gotol, small_imm") diff --git a/tools/testing/selftests/bpf/progs/verifier_ldsx.c b/tools/testing/selftests/bpf/progs/verifier_ldsx.c index 0c638f45aa..d4427d8e12 100644 --- a/tools/testing/selftests/bpf/progs/verifier_ldsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_ldsx.c @@ -5,19 +5,26 @@ #include "bpf_misc.h" #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 SEC("socket") __description("LDSX, S8") __success __success_unpriv __retval(-2) __naked void ldsx_s8(void) { - asm volatile (" \ - r1 = 0x3fe; \ - *(u64 *)(r10 - 8) = r1; \ - r0 = *(s8 *)(r10 - 8); \ - exit; \ -" ::: __clobber_all); + asm volatile ( + "r1 = 0x3fe;" + "*(u64 *)(r10 - 8) = r1;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(s8 *)(r10 - 8);" +#else + "r0 = *(s8 *)(r10 - 1);" +#endif + "exit;" + ::: __clobber_all); } SEC("socket") @@ -25,12 +32,16 @@ __description("LDSX, S16") __success __success_unpriv __retval(-2) __naked void ldsx_s16(void) { - asm volatile (" \ - r1 = 0x3fffe; \ - *(u64 *)(r10 - 8) = r1; \ - r0 = *(s16 *)(r10 - 8); \ - exit; \ -" ::: __clobber_all); + asm volatile ( + "r1 = 0x3fffe;" + "*(u64 *)(r10 - 8) = r1;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(s16 *)(r10 - 8);" +#else + "r0 = *(s16 *)(r10 - 2);" +#endif + "exit;" + ::: __clobber_all); } SEC("socket") @@ -38,35 +49,43 @@ __description("LDSX, S32") __success __success_unpriv __retval(-1) __naked void ldsx_s32(void) { - asm volatile (" \ - r1 = 0xfffffffe; \ - *(u64 *)(r10 - 8) = r1; \ - r0 = *(s32 *)(r10 - 8); \ - r0 >>= 1; \ - exit; \ -" ::: __clobber_all); + asm volatile ( + "r1 = 0xfffffffe;" + "*(u64 *)(r10 - 8) = r1;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r0 = *(s32 *)(r10 - 8);" +#else + "r0 = *(s32 *)(r10 - 4);" +#endif + "r0 >>= 1;" + "exit;" + ::: __clobber_all); } SEC("socket") __description("LDSX, S8 range checking, privileged") __log_level(2) __success __retval(1) -__msg("R1_w=scalar(smin=-128,smax=127)") +__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)") __naked void ldsx_s8_range_priv(void) { - asm volatile (" \ - call %[bpf_get_prandom_u32]; \ - *(u64 *)(r10 - 8) = r0; \ - r1 = *(s8 *)(r10 - 8); \ - /* r1 with s8 range */ \ - if r1 s> 0x7f goto l0_%=; \ - if r1 s< -0x80 goto l0_%=; \ - r0 = 1; \ -l1_%=: \ - exit; \ -l0_%=: \ - r0 = 2; \ - goto l1_%=; \ -" : + asm volatile ( + "call %[bpf_get_prandom_u32];" + "*(u64 *)(r10 - 8) = r0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r1 = *(s8 *)(r10 - 8);" +#else + "r1 = *(s8 *)(r10 - 1);" +#endif + /* r1 with s8 range */ + "if r1 s> 0x7f goto l0_%=;" + "if r1 s< -0x80 goto l0_%=;" + "r0 = 1;" +"l1_%=:" + "exit;" +"l0_%=:" + "r0 = 2;" + "goto l1_%=;" + : : __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -76,20 +95,24 @@ __description("LDSX, S16 range checking") __success __success_unpriv __retval(1) __naked void ldsx_s16_range(void) { - asm volatile (" \ - call %[bpf_get_prandom_u32]; \ - *(u64 *)(r10 - 8) = r0; \ - r1 = *(s16 *)(r10 - 8); \ - /* r1 with s16 range */ \ - if r1 s> 0x7fff goto l0_%=; \ - if r1 s< -0x8000 goto l0_%=; \ - r0 = 1; \ -l1_%=: \ - exit; \ -l0_%=: \ - r0 = 2; \ - goto l1_%=; \ -" : + asm volatile ( + "call %[bpf_get_prandom_u32];" + "*(u64 *)(r10 - 8) = r0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r1 = *(s16 *)(r10 - 8);" +#else + "r1 = *(s16 *)(r10 - 2);" +#endif + /* r1 with s16 range */ + "if r1 s> 0x7fff goto l0_%=;" + "if r1 s< -0x8000 goto l0_%=;" + "r0 = 1;" +"l1_%=:" + "exit;" +"l0_%=:" + "r0 = 2;" + "goto l1_%=;" + : : __imm(bpf_get_prandom_u32) : __clobber_all); } @@ -99,20 +122,24 @@ __description("LDSX, S32 range checking") __success __success_unpriv __retval(1) __naked void ldsx_s32_range(void) { - asm volatile (" \ - call %[bpf_get_prandom_u32]; \ - *(u64 *)(r10 - 8) = r0; \ - r1 = *(s32 *)(r10 - 8); \ - /* r1 with s16 range */ \ - if r1 s> 0x7fffFFFF goto l0_%=; \ - if r1 s< -0x80000000 goto l0_%=; \ - r0 = 1; \ -l1_%=: \ - exit; \ -l0_%=: \ - r0 = 2; \ - goto l1_%=; \ -" : + asm volatile ( + "call %[bpf_get_prandom_u32];" + "*(u64 *)(r10 - 8) = r0;" +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + "r1 = *(s32 *)(r10 - 8);" +#else + "r1 = *(s32 *)(r10 - 4);" +#endif + /* r1 with s16 range */ + "if r1 s> 0x7fffFFFF goto l0_%=;" + "if r1 s< -0x80000000 goto l0_%=;" + "r0 = 1;" +"l1_%=:" + "exit;" +"l0_%=:" + "r0 = 2;" + "goto l1_%=;" + : : __imm(bpf_get_prandom_u32) : __clobber_all); } diff --git a/tools/testing/selftests/bpf/progs/verifier_movsx.c b/tools/testing/selftests/bpf/progs/verifier_movsx.c index 3c8ac2c57b..cbb9d6714f 100644 --- a/tools/testing/selftests/bpf/progs/verifier_movsx.c +++ b/tools/testing/selftests/bpf/progs/verifier_movsx.c @@ -5,7 +5,10 @@ #include "bpf_misc.h" #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 SEC("socket") __description("MOV32SX, S8") diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c new file mode 100644 index 0000000000..6b564d4c09 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_precision.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 SUSE LLC */ +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") +__naked int bpf_neg(void) +{ + asm volatile ( + "r2 = 8;" + "r2 = -r2;" + "if r2 != -8 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_to_le(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = le16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_to_be(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = be16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \ + __clang_major__ >= 18 + +SEC("?raw_tp") +__success __log_level(2) +__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") +__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") +__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2") +__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") +__naked int bpf_end_bswap(void) +{ + asm volatile ( + "r2 = 0;" + "r2 = bswap16 r2;" + "if r2 != 0 goto 1f;" + "r1 = r10;" + "r1 += r2;" + "1:" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +#endif /* v4 instruction */ + +SEC("?raw_tp") +__success __log_level(2) +/* + * Without the bug fix there will be no history between "last_idx 3 first_idx 3" + * and "parent state regs=" lines. "R0_w=6" parts are here to help anchor + * expected log messages to the one specific mark_chain_precision operation. + * + * This is quite fragile: if verifier checkpointing heuristic changes, this + * might need adjusting. + */ +__msg("2: (07) r0 += 1 ; R0_w=6") +__msg("3: (35) if r0 >= 0xa goto pc+1") +__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1") +__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1") +__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1") +__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4") +__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1") +__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4") +__msg("3: R0_w=6") +__naked int state_loop_first_last_equal(void) +{ + asm volatile ( + "r0 = 0;" + "l0_%=:" + "r0 += 1;" + "r0 += 1;" + /* every few iterations we'll have a checkpoint here with + * first_idx == last_idx, potentially confusing precision + * backtracking logic + */ + "if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */ + "goto l0_%=;" + "l1_%=:" + "exit;" + ::: __clobber_common + ); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_sdiv.c b/tools/testing/selftests/bpf/progs/verifier_sdiv.c index 0990f88256..2a2271cf02 100644 --- a/tools/testing/selftests/bpf/progs/verifier_sdiv.c +++ b/tools/testing/selftests/bpf/progs/verifier_sdiv.c @@ -5,7 +5,10 @@ #include "bpf_misc.h" #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \ - (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64)) && __clang_major__ >= 18 + (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \ + defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \ + defined(__TARGET_ARCH_loongarch)) && \ + __clang_major__ >= 18 SEC("socket") __description("SDIV32, non-zero imm divisor, check 1") diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c index b2dfd7066c..f6d1cc9ad8 100644 --- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -21,7 +21,7 @@ extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash, enum xdp_rss_hash_type *rss_type) __ksym; -SEC("xdp") +SEC("xdp.frags") int rx(struct xdp_md *ctx) { void *data, *data_meta, *data_end; diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index e4c729768b..518329c666 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -179,7 +179,7 @@ static __always_inline __u32 tcp_ns_to_ts(__u64 ns) return ns / (NSEC_PER_SEC / TCP_TS_HZ); } -static __always_inline __u32 tcp_time_stamp_raw(void) +static __always_inline __u32 tcp_clock_ms(void) { return tcp_ns_to_ts(tcp_clock_ns()); } @@ -294,7 +294,7 @@ static __always_inline bool tscookie_init(struct tcphdr *tcp_header, if (!loop_ctx.option_timestamp) return false; - cookie = tcp_time_stamp_raw() & ~TSMASK; + cookie = tcp_clock_ms() & ~TSMASK; cookie |= loop_ctx.wscale & TS_OPT_WSCALE_MASK; if (loop_ctx.option_sack) cookie |= TS_OPT_SACK; diff --git a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c index 24369f2428..ccde6a4c63 100644 --- a/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c +++ b/tools/testing/selftests/bpf/progs/xsk_xdp_progs.c @@ -3,11 +3,12 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> -#include "xsk_xdp_metadata.h" +#include <linux/if_ether.h> +#include "xsk_xdp_common.h" struct { __uint(type, BPF_MAP_TYPE_XSKMAP); - __uint(max_entries, 1); + __uint(max_entries, 2); __uint(key_size, sizeof(int)); __uint(value_size, sizeof(int)); } xsk SEC(".maps"); @@ -52,4 +53,21 @@ SEC("xdp.frags") int xsk_xdp_populate_metadata(struct xdp_md *xdp) return bpf_redirect_map(&xsk, 0, XDP_DROP); } +SEC("xdp") int xsk_xdp_shared_umem(struct xdp_md *xdp) +{ + void *data = (void *)(long)xdp->data; + void *data_end = (void *)(long)xdp->data_end; + struct ethhdr *eth = data; + + if (eth + 1 > data_end) + return XDP_DROP; + + /* Redirecting packets based on the destination MAC address */ + idx = ((unsigned int)(eth->h_dest[5])) / 2; + if (idx > MAX_SOCKETS) + return XDP_DROP; + + return bpf_redirect_map(&xsk, idx, XDP_DROP); +} + char _license[] SEC("license") = "GPL"; |